code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCamelCase ( _snake_case ):
'''simple docstring'''
__snake_case = field(default=_snake_case , metadata={'help': 'Whether to use SortishSampler or not.'} )
__snake_case = field(
default=_snake_case , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__snake_case = field(
default=_snake_case , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
__snake_case = field(
default=_snake_case , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
__snake_case = field(
default=_snake_case , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
A__ : Any =super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =v.to_dict()
return d
| 713 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__snake_case : str = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__snake_case : List[str] = []
__snake_case : Optional[int] = []
__snake_case : Dict = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
__snake_case : str = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
__snake_case : Tuple = 0
for log in Path().glob('*.log'):
__snake_case : str = 0
with open(log, 'r') as f:
for line in f:
__snake_case : Union[str, Any] = json.loads(line)
if line.get('nodeid', '') != "":
__snake_case : List[Any] = line['nodeid']
if line.get('duration', None) is not None:
__snake_case : Optional[int] = F"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__snake_case : Dict = []
log.unlink()
__snake_case : List[str] = ''
__snake_case : Optional[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__snake_case : Tuple = []
__snake_case : Optional[int] = {}
for test in failed_tests:
__snake_case : List[str] = test[0].split('::')
__snake_case : Union[str, Any] = data[0].split('/')[-1]
if data[0] not in filesafailed:
__snake_case : Any = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__snake_case : int = [test[0] for test in failed_table]
__snake_case : int = list(set(files))
# Count number of instances in failed_tests
__snake_case : Union[str, Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__snake_case : Tuple = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__snake_case : List[str] = 'Too many failed tests, please see the full report in the Action results.'
__snake_case : List[str] = len(err) + 10
__snake_case : Union[str, Any] = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
__snake_case : Tuple = 'No failed tests! 🤗'
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__snake_case : Tuple = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__snake_case : Optional[int] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
__snake_case : Any = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__snake_case : Optional[Any] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__snake_case : str = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__snake_case : Optional[Any] = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__snake_case : Any = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__snake_case : Dict = row[0]
else:
__snake_case : Dict = ''
__snake_case : str = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 714 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case : Optional[int] = 16
__snake_case : int = 32
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : int, __snake_case : Union[str, Any] = 16 ) -> int:
"""simple docstring"""
A__ : List[str] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ : List[Any] =DatasetDict(
{
"""train""": dataset["""train"""].select(__A ),
"""validation""": dataset["""train"""].select(__A ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__snake_case : Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ : Optional[int] =tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=__A, max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : List[str] =datasets.map(
__A, batched=__A, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] =tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(__snake_case : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Any =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : Union[str, Any] =16
elif accelerator.mixed_precision != "no":
A__ : List[Any] =8
else:
A__ : Optional[int] =None
return tokenizer.pad(
__A, padding="""longest""", max_length=__A, pad_to_multiple_of=__A, return_tensors="""pt""", )
# Instantiate dataloaders.
A__ : Any =DataLoader(
tokenized_datasets["""train"""], shuffle=__A, collate_fn=__A, batch_size=__A )
A__ : Union[str, Any] =DataLoader(
tokenized_datasets["""validation"""], shuffle=__A, collate_fn=__A, batch_size=__A )
A__ : Union[str, Any] =DataLoader(
tokenized_datasets["""test"""], shuffle=__A, collate_fn=__A, batch_size=__A )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCamelCase ( __snake_case : int, __snake_case : Dict ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =[]
# Download the dataset
A__ : int =load_dataset("""glue""", """mrpc""" )
# Create our splits
A__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ : str =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : str =config["""lr"""]
A__ : List[Any] =int(config["""num_epochs"""] )
A__ : List[str] =int(config["""seed"""] )
A__ : Tuple =int(config["""batch_size"""] )
A__ : Union[str, Any] =evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
A__ : List[str] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
A__ : str =MAX_GPU_BATCH_SIZE
set_seed(__A )
# New Code #
# Create our folds:
A__ : str =kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
A__ : Dict =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__A ):
A__ : Optional[Any] =get_fold_dataloaders(
__A, __A, __A, __A, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
A__ : Any =AdamW(params=model.parameters(), lr=__A )
# Instantiate scheduler
A__ : Optional[Any] =get_linear_schedule_with_warmup(
optimizer=__A, num_warmup_steps=100, num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ : Any =accelerator.prepare(
__A, __A, __A, __A, __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Optional[int] =model(**__A )
A__ : Tuple =outputs.loss
A__ : int =loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : List[str] =model(**__A )
A__ : Tuple =outputs.logits.argmax(dim=-1 )
A__ : int =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A, references=__A, )
A__ : Optional[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", __A )
# New Code #
# We also run predictions on the test set at the very end
A__ : Optional[Any] =[]
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] =model(**__A )
A__ : Tuple =outputs.logits
A__ : int =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__A, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ : Any =torch.cat(__A, dim=0 )
A__ : Dict =torch.stack(__A, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ : List[Any] =metric.compute(predictions=__A, references=__A )
accelerator.print("""Average test metrics from all folds:""", __A )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Any =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=__A, default=__A, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=__A, default=3, help="""The number of splits to perform across the dataset""" )
A__ : Tuple =parser.parse_args()
A__ : Any ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__A, __A )
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : List[str] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case : Any = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case : Optional[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__snake_case : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__snake_case : Optional[Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__snake_case : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case : Union[str, Any] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowerCamelCase ( a__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCamelCase ( a__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__snake_case : Tuple = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__snake_case : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__snake_case : Any = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a__ )
class lowerCamelCase :
'''simple docstring'''
def __call__( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Dict = False , lowerCAmelCase_ : List[Any] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Optional[Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
elif titles is None or texts is None:
A__ : Optional[int] =titles if texts is None else texts
return super().__call__(
_A , _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
A__ : str =titles if not isinstance(_A , _A ) else [titles]
A__ : List[str] =texts if not isinstance(_A , _A ) else [texts]
A__ : Union[str, Any] =len(_A )
A__ : Optional[Any] =questions if not isinstance(_A , _A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
f"There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts." )
A__ : Union[str, Any] =super().__call__(_A , _A , padding=_A , truncation=_A )['input_ids']
A__ : Tuple =super().__call__(_A , add_special_tokens=_A , padding=_A , truncation=_A )['input_ids']
A__ : Optional[int] ={
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A , _A )
]
}
if return_attention_mask is not False:
A__ : Tuple =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ : List[Any] =attention_mask
return self.pad(_A , padding=_A , max_length=_A , return_tensors=_A )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] = 16 , lowerCAmelCase_ : Optional[int] = 64 , lowerCAmelCase_ : List[Any] = 4 , ) -> List[Any]:
'''simple docstring'''
A__ : int =reader_input['input_ids']
A__ : int =reader_output[:3]
A__ : Optional[Any] =len(_A )
A__ : Any =sorted(range(_A ) , reverse=_A , key=relevance_logits.__getitem__ )
A__ : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
A__ : int =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ : Any =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ : List[str] =sequence_ids.index(self.pad_token_id )
else:
A__ : Optional[int] =len(_A )
A__ : Optional[Any] =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_A , top_spans=_A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_A , start_index=_A , end_index=_A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =[]
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ : Tuple =sorted(_A , key=lambda lowerCAmelCase_ : x[1] , reverse=_A )
A__ : int =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
A__ : List[str] =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class lowerCamelCase ( a__ , a__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = READER_PRETRAINED_VOCAB_FILES_MAP
__snake_case = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = READER_PRETRAINED_INIT_CONFIGURATION
__snake_case = ['input_ids', 'attention_mask']
| 716 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 0 |
import numpy as np
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : str =(0, 0)
A__ : List[Any] =None
A__ : Dict =0
A__ : List[str] =0
A__ : Dict =0
def __eq__( self : Tuple , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
return self.position == cell.position
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
print(self.position )
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=(5, 5) ) -> int:
'''simple docstring'''
A__ : Tuple =np.zeros(__lowerCamelCase )
A__ : str =world_size[0]
A__ : Dict =world_size[1]
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
print(self.w )
def lowercase__ ( self : str , lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Any =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : Optional[Any] =cell.position[0]
A__ : Optional[Any] =cell.position[1]
A__ : Tuple =[]
for n in neughbour_cord:
A__ : Any =current_x + n[0]
A__ : Union[str, Any] =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : Optional[Any] =Cell()
A__ : Union[str, Any] =(x, y)
A__ : List[Any] =cell
neighbours.append(__lowerCamelCase )
return neighbours
def __lowerCamelCase ( __snake_case : Any, __snake_case : Optional[int], __snake_case : int ) -> Any:
"""simple docstring"""
A__ : Union[str, Any] =[]
A__ : List[str] =[]
_open.append(_lowerCamelCase )
while _open:
A__ : List[str] =np.argmin([n.f for n in _open] )
A__ : Dict =_open[min_f]
_closed.append(_open.pop(_lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCamelCase ):
for c in _closed:
if c == n:
continue
A__ : Optional[int] =current.g + 1
A__ : List[str] =n.position
A__ : Dict =goal.position
A__ : List[str] =(ya - ya) ** 2 + (xa - xa) ** 2
A__ : List[str] =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCamelCase )
A__ : int =[]
while current.parent is not None:
path.append(current.position )
A__ : List[Any] =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__snake_case : List[Any] = Gridworld()
# Start position and goal
__snake_case : List[Any] = Cell()
__snake_case : Optional[int] = (0, 0)
__snake_case : Optional[int] = Cell()
__snake_case : List[Any] = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
__snake_case : Optional[Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__snake_case : Optional[int] = 1
print(world.w)
| 717 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
__snake_case = 'levit'
def __init__( self : Optional[Any] , lowerCAmelCase_ : int=2_24 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : Dict=[1_28, 2_56, 3_84] , lowerCAmelCase_ : Any=[4, 8, 12] , lowerCAmelCase_ : List[Any]=[4, 4, 4] , lowerCAmelCase_ : Union[str, Any]=[16, 16, 16] , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Tuple=[2, 2, 2] , lowerCAmelCase_ : Tuple=[2, 2, 2] , lowerCAmelCase_ : List[str]=0.02 , **lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
A__ : List[Any] =image_size
A__ : List[str] =num_channels
A__ : Any =kernel_size
A__ : Tuple =stride
A__ : Dict =padding
A__ : int =hidden_sizes
A__ : Optional[int] =num_attention_heads
A__ : Tuple =depths
A__ : List[Any] =key_dim
A__ : Optional[int] =drop_path_rate
A__ : Tuple =patch_size
A__ : List[str] =attention_ratio
A__ : Optional[Any] =mlp_ratio
A__ : str =initializer_range
A__ : List[str] =[
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self : str ) -> float:
'''simple docstring'''
return 1e-4
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__snake_case : Tuple = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
__snake_case = ['audio_values', 'audio_mask']
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=20_48 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=[16, 16] , lowerCAmelCase_ : Optional[int]=1_28 , lowerCAmelCase_ : Union[str, Any]=4_41_00 , lowerCAmelCase_ : List[str]=86 , lowerCAmelCase_ : int=20_48 , lowerCAmelCase_ : Optional[Any]=0.0 , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase , )
A__ : int =spectrogram_length
A__ : Optional[Any] =num_channels
A__ : Optional[Any] =patch_size
A__ : Any =feature_size // self.patch_size[1]
A__ : List[Any] =n_fft
A__ : Optional[Any] =sampling_rate // hop_length_to_sampling_rate
A__ : Optional[Any] =sampling_rate
A__ : Any =padding_value
A__ : Tuple =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCamelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__UpperCamelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def lowercase__ ( self : Dict , lowerCAmelCase_ : np.array ) -> np.ndarray:
'''simple docstring'''
A__ : int =spectrogram(
__UpperCamelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
A__ : Union[str, Any] =log_spec[:, :-1]
A__ : Any =log_spec - 20.0
A__ : Union[str, Any] =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[bool] = True , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : str , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A__ : Optional[Any] =isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A__ : Dict =is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : Tuple =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
A__ : Any =np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ : str =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : List[str] =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A__ : Union[str, Any] =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __UpperCamelCase ):
A__ : str =[np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A__ : Dict =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A__ : Union[str, Any] =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A__ : Tuple =np.array(__UpperCamelCase ).astype(np.floataa )
# convert into correct format for padding
A__ : int =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A__ : Optional[int] =np.ones([len(__UpperCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A__ : Any =padded_audio_features * self.padding_value
for i in range(len(__UpperCamelCase ) ):
A__ : int =audio_features[i]
A__ : Tuple =feature
# return as BatchFeature
if return_attention_mask:
A__ : Union[str, Any] ={"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
A__ : Tuple ={"""audio_values""": padded_audio_features}
A__ : List[Any] =BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
return encoded_inputs
| 719 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase :
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Any=99 , lowerCAmelCase_ : Optional[Any]=[1, 1, 2] , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : int="gelu_new" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=False , ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =parent
A__ : Dict =batch_size
A__ : Optional[int] =seq_length
A__ : str =is_training
A__ : Tuple =use_input_mask
A__ : Union[str, Any] =use_token_type_ids
A__ : List[Any] =use_labels
A__ : Dict =vocab_size
A__ : Union[str, Any] =block_sizes
A__ : str =num_decoder_layers
A__ : List[str] =d_model
A__ : Tuple =n_head
A__ : Union[str, Any] =d_head
A__ : Any =d_inner
A__ : Union[str, Any] =hidden_act
A__ : str =hidden_dropout
A__ : str =attention_dropout
A__ : Dict =activation_dropout
A__ : Tuple =max_position_embeddings
A__ : Tuple =type_vocab_size
A__ : List[Any] =2
A__ : Any =num_labels
A__ : Optional[int] =num_choices
A__ : int =scope
A__ : Any =initializer_std
# Used in the tests to check the size of the first attention layer
A__ : List[str] =n_head
# Used in the tests to check the size of the first hidden state
A__ : Optional[int] =self.d_model
# Used in the tests to check the number of output hidden states/attentions
A__ : List[Any] =sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A__ : Optional[Any] =self.num_hidden_layers + 2
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : int =None
if self.use_input_mask:
A__ : int =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] =None
if self.use_token_type_ids:
A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Tuple =None
A__ : List[str] =None
A__ : str =None
if self.use_labels:
A__ : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] =FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , ) -> Optional[int]:
'''simple docstring'''
A__ : int =TFFunnelModel(config=snake_case_ )
A__ : Tuple ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : Tuple =model(snake_case_ )
A__ : Optional[Any] =[input_ids, input_mask]
A__ : Union[str, Any] =model(snake_case_ )
A__ : Optional[int] =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A__ : str =False
A__ : Union[str, Any] =TFFunnelModel(config=snake_case_ )
A__ : List[Any] =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A__ : Any =False
A__ : int =TFFunnelModel(config=snake_case_ )
A__ : Tuple =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =TFFunnelBaseModel(config=snake_case_ )
A__ : Tuple ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : List[str] =model(snake_case_ )
A__ : str =[input_ids, input_mask]
A__ : Tuple =model(snake_case_ )
A__ : List[str] =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A__ : Any =False
A__ : Optional[int] =TFFunnelBaseModel(config=snake_case_ )
A__ : List[Any] =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A__ : Dict =False
A__ : str =TFFunnelBaseModel(config=snake_case_ )
A__ : Tuple =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =TFFunnelForPreTraining(config=snake_case_ )
A__ : Union[str, Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : int =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] =TFFunnelForMaskedLM(config=snake_case_ )
A__ : str ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : Dict =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =self.num_labels
A__ : Optional[int] =TFFunnelForSequenceClassification(config=snake_case_ )
A__ : int ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : Optional[Any] =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] =self.num_choices
A__ : Any =TFFunnelForMultipleChoice(config=snake_case_ )
A__ : List[str] =tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A__ : Dict =tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A__ : str =tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A__ : Optional[Any] ={
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A__ : Union[str, Any] =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.num_labels
A__ : Dict =TFFunnelForTokenClassification(config=snake_case_ )
A__ : Tuple ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : Union[str, Any] =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] =TFFunnelForQuestionAnswering(config=snake_case_ )
A__ : int ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : int =model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
A__ : List[str] =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Any =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _a , _a , unittest.TestCase ):
__snake_case = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : List[Any] =TFFunnelModelTester(self )
A__ : Dict =ConfigTester(self , config_class=snake_case_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@require_tf
class lowerCamelCase ( _a , unittest.TestCase ):
__snake_case = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__snake_case = False
__snake_case = False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
A__ : List[Any] =TFFunnelModelTester(self , base=snake_case_ )
A__ : List[str] =ConfigTester(self , config_class=snake_case_ )
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case_ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
| 720 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__snake_case : List[str] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__snake_case : Optional[int] = []
__snake_case : int = []
__snake_case : Optional[int] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
__snake_case : str = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'''emoji''': True,
},
}
]
__snake_case : Union[str, Any] = 0
for log in Path().glob('*.log'):
__snake_case : Dict = 0
with open(log, 'r') as f:
for line in f:
__snake_case : Dict = json.loads(line)
if line.get('nodeid', '') != "":
__snake_case : Optional[int] = line['''nodeid''']
if line.get('duration', None) is not None:
__snake_case : int = F"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__snake_case : Dict = []
log.unlink()
__snake_case : Union[str, Any] = ''''''
__snake_case : Union[str, Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__snake_case : List[str] = []
__snake_case : Optional[int] = {}
for test in failed_tests:
__snake_case : Tuple = test[0].split('::')
__snake_case : Any = data[0].split('/')[-1]
if data[0] not in filesafailed:
__snake_case : Union[str, Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__snake_case : str = [test[0] for test in failed_table]
__snake_case : Optional[int] = list(set(files))
# Count number of instances in failed_tests
__snake_case : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__snake_case : List[Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__snake_case : List[str] = '''Too many failed tests, please see the full report in the Action results.'''
__snake_case : List[Any] = len(err) + 10
__snake_case : int = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
__snake_case : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__snake_case : List[str] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__snake_case : List[str] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
__snake_case : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__snake_case : List[str] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__snake_case : Optional[Any] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__snake_case : Union[str, Any] = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__snake_case : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__snake_case : List[str] = row[0]
else:
__snake_case : Optional[Any] = ''''''
__snake_case : Union[str, Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 721 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 0 |
'''simple docstring'''
import argparse
import copy
def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] ={}
with open(snake_case__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A__ : Any =[]
_list.append([line.split()[1], line.split()[2]] )
A__ : List[str] =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A__ : Any =[]
_list.append([line.split()[0], line.split()[2]] )
A__ : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowerCamelCase ( __snake_case : str, __snake_case : Any ) -> List[Any]:
"""simple docstring"""
with open(snake_case__ ) as f:
A__ : Union[str, Any] =f.read(1 )
A__ : List[Any] =start_node
A__ : Dict =[]
A__ : Union[str, Any] =start_node
A__ : Optional[Any] =0
while visiting not in first_solution:
A__ : Dict =10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case__ ) and k[0] not in first_solution:
A__ : Optional[Any] =k[1]
A__ : Optional[Any] =k[0]
first_solution.append(snake_case__ )
A__ : int =distance_of_first_solution + int(snake_case__ )
A__ : Optional[int] =best_node
first_solution.append(snake_case__ )
A__ : List[Any] =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A__ : Dict =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Dict:
"""simple docstring"""
A__ : Union[str, Any] =[]
for n in solution[1:-1]:
A__ : Tuple =solution.index(snake_case__ )
for kn in solution[1:-1]:
A__ : Dict =solution.index(snake_case__ )
if n == kn:
continue
A__ : Optional[Any] =copy.deepcopy(snake_case__ )
A__ : Optional[Any] =kn
A__ : Any =n
A__ : Optional[int] =0
for k in _tmp[:-1]:
A__ : List[str] =_tmp[_tmp.index(snake_case__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A__ : Dict =distance + int(i[1] )
_tmp.append(snake_case__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A__ : List[str] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Union[str, Any], __snake_case : Union[str, Any], __snake_case : Any, __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] =1
A__ : Union[str, Any] =first_solution
A__ : Optional[int] =[]
A__ : Union[str, Any] =distance_of_first_solution
A__ : Union[str, Any] =solution
while count <= iters:
A__ : Optional[int] =find_neighborhood(snake_case__, snake_case__ )
A__ : Union[str, Any] =0
A__ : Union[str, Any] =neighborhood[index_of_best_solution]
A__ : Any =len(snake_case__ ) - 1
A__ : int =False
while not found:
A__ : Union[str, Any] =0
while i < len(snake_case__ ):
if best_solution[i] != solution[i]:
A__ : List[str] =best_solution[i]
A__ : Tuple =solution[i]
break
A__ : int =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A__ : Optional[int] =True
A__ : Optional[int] =best_solution[:-1]
A__ : Optional[int] =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A__ : str =cost
A__ : List[str] =solution
else:
A__ : Dict =index_of_best_solution + 1
A__ : List[Any] =neighborhood[index_of_best_solution]
if len(snake_case__ ) >= size:
tabu_list.pop(0 )
A__ : List[Any] =count + 1
return best_solution_ever, best_cost
def __lowerCamelCase ( __snake_case : List[str]=None ) -> int:
"""simple docstring"""
A__ : Any =generate_neighbours(args.File )
A__ , A__ : List[Any] =generate_first_solution(
args.File, snake_case__ )
A__ , A__ : Optional[Any] =tabu_search(
snake_case__, snake_case__, snake_case__, args.Iterations, args.Size, )
print(f"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[int] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : List[str], __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
A__ : List[str] =[image]
if isinstance(image[0], PIL.Image.Image ):
A__ : Any =[np.array(i.resize((w, h), resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
A__ : Optional[int] =np.concatenate(__snake_case, axis=0 )
A__ : Optional[Any] =np.array(__snake_case ).astype(np.floataa ) / 2_55.0
A__ : Optional[Any] =image.transpose(0, 3, 1, 2 )
A__ : int =2.0 * image - 1.0
A__ : Optional[Any] =torch.from_numpy(__snake_case )
elif isinstance(image[0], torch.Tensor ):
A__ : Any =torch.cat(__snake_case, dim=0 )
return image
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[int], __snake_case : Any, __snake_case : Dict=0.99_95 ) -> List[str]:
"""simple docstring"""
if not isinstance(__snake_case, np.ndarray ):
A__ : Optional[int] =True
A__ : Union[str, Any] =va.device
A__ : Optional[Any] =va.cpu().numpy()
A__ : int =va.cpu().numpy()
A__ : Union[str, Any] =np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) )
if np.abs(__snake_case ) > DOT_THRESHOLD:
A__ : Tuple =(1 - t) * va + t * va
else:
A__ : str =np.arccos(__snake_case )
A__ : Any =np.sin(__snake_case )
A__ : Tuple =theta_a * t
A__ : Dict =np.sin(__snake_case )
A__ : List[Any] =np.sin(theta_a - theta_t ) / sin_theta_a
A__ : Optional[Any] =sin_theta_t / sin_theta_a
A__ : List[str] =sa * va + sa * va
if inputs_are_torch:
A__ : Tuple =torch.from_numpy(__snake_case ).to(__snake_case )
return va
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] =F.normalize(__snake_case, dim=-1 )
A__ : List[str] =F.normalize(__snake_case, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str ) -> Optional[int]:
"""simple docstring"""
for param in model.parameters():
A__ : Optional[Any] =value
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , clip_model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , coca_model=lowerCAmelCase_ , coca_tokenizer=lowerCAmelCase_ , coca_transform=lowerCAmelCase_ , )
A__ : Tuple =(
feature_extractor.size
if isinstance(feature_extractor.size , lowerCAmelCase_ )
else feature_extractor.size["""shortest_edge"""]
)
A__ : str =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCAmelCase_ )
set_requires_grad(self.clip_model , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[Any] = "auto" ) -> Dict:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ : Tuple =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
set_requires_grad(self.vae , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
set_requires_grad(self.vae , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
set_requires_grad(self.unet , lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
set_requires_grad(self.unet , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
# get the original timestep using init_timestep
A__ : str =min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
A__ : Optional[int] =max(num_inference_steps - init_timestep , 0 )
A__ : Dict =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=None ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase_ )}" )
A__ : Optional[Any] =image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =[
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
A__ : int =torch.cat(lowerCAmelCase_ , dim=0 )
else:
A__ : List[str] =self.vae.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
A__ : Optional[Any] =0.18215 * init_latents
A__ : List[Any] =init_latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
A__ : List[str] =randn_tensor(init_latents.shape , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
A__ : Optional[int] =self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =init_latents
return latents
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] =self.coca_transform(lowerCAmelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
A__ : Tuple =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
A__ : Tuple =self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =self.feature_extractor.preprocess(lowerCAmelCase_ )
A__ : Optional[int] =torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
A__ : Union[str, Any] =self.clip_model.get_image_features(lowerCAmelCase_ )
A__ : List[str] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ )
A__ : List[str] =image_embeddings_clip.repeat_interleave(lowerCAmelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , ) -> Dict:
'''simple docstring'''
A__ : int =latents.detach().requires_grad_()
A__ : Tuple =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
A__ : str =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
A__ : int =self.scheduler.alphas_cumprod[timestep]
A__ : Any =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ : str =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
A__ : List[Any] =torch.sqrt(lowerCAmelCase_ )
A__ : Union[str, Any] =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCAmelCase_ ):
A__ : List[str] =self.scheduler.sigmas[index]
A__ : List[Any] =latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
A__ : Optional[int] =1 / 0.18215 * sample
A__ : int =self.vae.decode(lowerCAmelCase_ ).sample
A__ : int =(image / 2 + 0.5).clamp(0 , 1 )
A__ : Union[str, Any] =transforms.Resize(self.feature_extractor_size )(lowerCAmelCase_ )
A__ : Union[str, Any] =self.normalize(lowerCAmelCase_ ).to(latents.dtype )
A__ : Dict =self.clip_model.get_image_features(lowerCAmelCase_ )
A__ : Optional[Any] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ )
A__ : Optional[Any] =spherical_dist_loss(lowerCAmelCase_ , lowerCAmelCase_ ).mean() * clip_guidance_scale
A__ : List[Any] =-torch.autograd.grad(lowerCAmelCase_ , lowerCAmelCase_ )[0]
if isinstance(self.scheduler , lowerCAmelCase_ ):
A__ : Optional[int] =latents.detach() + grads * (sigma**2)
A__ : Optional[int] =noise_pred_original
else:
A__ : Tuple =noise_pred_original - torch.sqrt(lowerCAmelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[Any] = 5_12 , lowerCAmelCase_ : List[Any] = 5_12 , lowerCAmelCase_ : Any = 0.6 , lowerCAmelCase_ : List[Any] = 50 , lowerCAmelCase_ : int = 7.5 , lowerCAmelCase_ : Any = 1 , lowerCAmelCase_ : List[str] = 0.0 , lowerCAmelCase_ : List[Any] = 1_00 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = "pil" , lowerCAmelCase_ : Optional[Any] = True , lowerCAmelCase_ : Any = 0.8 , lowerCAmelCase_ : List[Any] = 0.1 , lowerCAmelCase_ : List[Any] = 0.1 , ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(lowerCAmelCase_ )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(lowerCAmelCase_ , torch.Generator ) and batch_size > 1:
A__ : str =[generator] + [None] * (batch_size - 1)
A__ : List[Any] =[
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
A__ : Dict =[x[0] for x in coca_is_none if x[1]]
A__ : Union[str, Any] =""", """.join(lowerCAmelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase_ ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
A__ : Tuple =self.get_image_description(lowerCAmelCase_ )
if style_prompt is None:
if len(lowerCAmelCase_ ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
A__ : Optional[Any] =self.get_image_description(lowerCAmelCase_ )
# get prompt text embeddings for content and style
A__ : Optional[Any] =self.tokenizer(
lowerCAmelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="""pt""" , )
A__ : str =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
A__ : Union[str, Any] =self.tokenizer(
lowerCAmelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="""pt""" , )
A__ : Any =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
A__ : Optional[Any] =slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# duplicate text embeddings for each generation per prompt
A__ : Optional[Any] =text_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 )
# set timesteps
A__ : Optional[Any] ="""offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
A__ : Optional[Any] ={}
if accepts_offset:
A__ : Any =1
self.scheduler.set_timesteps(lowerCAmelCase_ , **lowerCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
A__ , A__ : int =self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , self.device )
A__ : Tuple =timesteps[:1].repeat(lowerCAmelCase_ )
# Preprocess image
A__ : Dict =preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ )
A__ : int =preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[Any] =self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ )
A__ : Dict =slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if clip_guidance_scale > 0:
A__ : List[Any] =self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =slerp(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ : Any =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ : Tuple =content_text_input.input_ids.shape[-1]
A__ : List[str] =self.tokenizer([""""""] , padding="""max_length""" , max_length=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
A__ : List[Any] =uncond_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ : Union[str, Any] =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ : List[Any] =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
A__ : str =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
A__ : int =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="""cpu""" , dtype=lowerCAmelCase_ ).to(
self.device )
else:
A__ : Optional[Any] =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A__ : Tuple =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ : List[Any] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : int ="""eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Union[str, Any] ={}
if accepts_eta:
A__ : Tuple =eta
# check if the scheduler accepts generator
A__ : str ="""generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
A__ : Tuple =generator
with self.progress_bar(total=lowerCAmelCase_ ):
for i, t in enumerate(lowerCAmelCase_ ):
# expand the latents if we are doing classifier free guidance
A__ : str =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : List[str] =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
A__ : Optional[Any] =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
A__ , A__ : int =noise_pred.chunk(2 )
A__ : Optional[int] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
A__ : List[str] =(
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
A__ , A__ : Any =self.cond_fn(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
A__ : List[str] =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
A__ : List[Any] =1 / 0.18215 * latents
A__ : Tuple =self.vae.decode(lowerCAmelCase_ ).sample
A__ : Dict =(image / 2 + 0.5).clamp(0 , 1 )
A__ : Tuple =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : str =self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_ )
| 702 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
A__ : str =real
if isinstance(__A , __A ):
A__ : Optional[Any] =[1] * rank
else:
A__ : int =rank
def __repr__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return (
f"{self.real}+"
f"{'+'.join(str(__A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __A )
def __add__( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
'''simple docstring'''
if not isinstance(__A , __A ):
return Dual(self.real + other , self.duals )
A__ : str =self.duals.copy()
A__ : List[str] =other.duals.copy()
if len(__A ) > len(__A ):
o_dual.extend([1] * (len(__A ) - len(__A )) )
elif len(__A ) < len(__A ):
s_dual.extend([1] * (len(__A ) - len(__A )) )
A__ : str =[]
for i in range(len(__A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __A )
__snake_case = __add__
def __sub__( self : Optional[int] , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
if not isinstance(__A , __A ):
A__ : str =[]
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __A )
A__ : Tuple =[0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __A )
__snake_case = __mul__
def __truediv__( self : Dict , lowerCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
if not isinstance(__A , __A ):
A__ : Union[str, Any] =[]
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __A )
raise ValueError
def __floordiv__( self : Tuple , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if not isinstance(__A , __A ):
A__ : List[str] =[]
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __A )
raise ValueError
def __pow__( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if n < 0 or isinstance(__A , __A ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
A__ : Any =self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCamelCase ( __snake_case : int, __snake_case : Optional[int], __snake_case : int ) -> Any:
"""simple docstring"""
if not callable(snake_case_ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(snake_case_, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(snake_case_, snake_case_ ):
raise ValueError("""differentiate() requires an int as input for order""" )
A__ : Optional[Any] =Dual(snake_case_, 1 )
A__ : Optional[int] =func(snake_case_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCamelCase ( __snake_case : List[str] ) -> int:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 703 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
__snake_case = 'encoder-decoder'
__snake_case = True
def __init__( self : Optional[int] , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ : List[Any] =kwargs.pop("""encoder""" )
A__ : Optional[Any] =encoder_config.pop("""model_type""" )
A__ : Any =kwargs.pop("""decoder""" )
A__ : List[Any] =decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
A__ : List[Any] =AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Dict =True
@classmethod
def lowercase__ ( cls : int , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : List[str] ) -> PretrainedConfig:
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A__ : Tuple =True
A__ : List[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
A__ : str =copy.deepcopy(self.__dict__ )
A__ : Dict =self.encoder.to_dict()
A__ : List[Any] =self.decoder.to_dict()
A__ : Dict =self.__class__.model_type
return output
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 0 |
import argparse
import os
import re
import packaging.version
__snake_case : Optional[Any] = 'examples/'
__snake_case : Union[str, Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__snake_case : Optional[Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
__snake_case : str = 'README.md'
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Union[str, Any], __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__snake_case, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int =f.read()
A__ : Union[str, Any] =REPLACE_PATTERNS[pattern]
A__ : Union[str, Any] =replace.replace("""VERSION""", __snake_case )
A__ : List[Any] =re_pattern.sub(__snake_case, __snake_case )
with open(__snake_case, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.write(__snake_case )
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__snake_case, __snake_case ), __snake_case, pattern="""examples""" )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Tuple=False ) -> List[Any]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case, __snake_case, __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ : Optional[Any] ="""🤗 Transformers currently provides the following architectures"""
A__ : Optional[int] ="""1. Want to contribute a new model?"""
with open(__snake_case, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] =f.readlines()
# Find the start of the list.
A__ : Optional[int] =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A__ : List[str] =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
A__ : Optional[int] =lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""", """https://huggingface.co/docs/diffusers/model_doc""", )
index += 1
with open(__snake_case, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(__snake_case )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
with open(REPLACE_FILES["""init"""], """r""" ) as f:
A__ : Optional[Any] =f.read()
A__ : int =REPLACE_PATTERNS["""init"""][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def __lowerCamelCase ( __snake_case : Dict=False ) -> Any:
"""simple docstring"""
A__ : Dict =get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
A__ : Optional[int] =default_version.base_version
elif patch:
A__ : int =f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
A__ : Union[str, Any] =f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
A__ : Tuple =input(f"Which version are you releasing? [{default_version}]" )
if len(__snake_case ) == 0:
A__ : List[str] =default_version
print(f"Updating version to {version}." )
global_version_update(__snake_case, patch=__snake_case )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =get_version()
A__ : Tuple =f"{current_version.major}.{current_version.minor + 1}.0.dev0"
A__ : List[str] =current_version.base_version
# Check with the user we got that right.
A__ : List[Any] =input(f"Which version are we developing now? [{dev_version}]" )
if len(__snake_case ) == 0:
A__ : Tuple =dev_version
print(f"Updating version to {version}." )
global_version_update(__snake_case )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__snake_case : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 705 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( __a , unittest.TestCase ):
'''simple docstring'''
__snake_case = VideoToVideoSDPipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
__snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
__snake_case = False
# No `output_type`.
__snake_case = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
A__ : int =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
A__ : Any =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ : Optional[Any] =CLIPTextModel(a_ )
A__ : Union[str, Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowercase__ ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("""mps""" ):
A__ : Dict =torch.manual_seed(a_ )
else:
A__ : List[Any] =torch.Generator(device=a_ ).manual_seed(a_ )
A__ : int ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[Any] =self.get_dummy_components()
A__ : int =VideoToVideoSDPipeline(**a_ )
A__ : int =sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
A__ : Optional[Any] =self.get_dummy_inputs(a_ )
A__ : Tuple ="""np"""
A__ : Tuple =sd_pipe(**a_ ).frames
A__ : Any =frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A__ : int =np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ : List[Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Any =torch.randn((1, 10, 3, 10_24, 5_76) , generator=a_ )
A__ : Union[str, Any] =video.to("""cuda""" )
A__ : List[Any] ="""Spiderman is surfing"""
A__ : Any =pipe(a_ , video=a_ , generator=a_ , num_inference_steps=3 , output_type="""pt""" ).frames
A__ : int =np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 706 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Dict = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCamelCase ( __snake_case : str ) -> str:
"""simple docstring"""
with open(_lowercase, """rb""" ) as f:
A__ : List[str] =Image.open(_lowercase )
return im.convert("""RGB""" )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default=a__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__snake_case = field(
default=a__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__snake_case = field(default=a__ , metadata={'help': 'A folder containing the training data.'} )
__snake_case = field(default=a__ , metadata={'help': 'A folder containing the validation data.'} )
__snake_case = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__snake_case = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__snake_case = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__snake_case = field(
default=a__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(a__ )} , )
__snake_case = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__snake_case = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__snake_case = field(default=a__ , metadata={'help': 'Name or path of preprocessor config.'} )
__snake_case = field(
default=a__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__snake_case = field(
default=a__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowerCamelCase ( __snake_case : Tuple ) -> Dict:
"""simple docstring"""
A__ : Tuple =torch.stack([example["""pixel_values"""] for example in examples] )
A__ : Dict =torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : str =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ : Union[str, Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ : int =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""", _lowercase, _lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ : Optional[int] =training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
A__ : List[Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : Optional[int] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A__ : Tuple =load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task="""image-classification""", use_auth_token=True if model_args.use_auth_token else None, )
else:
A__ : Tuple ={}
if data_args.train_dir is not None:
A__ : int =os.path.join(data_args.train_dir, """**""" )
if data_args.validation_dir is not None:
A__ : Any =os.path.join(data_args.validation_dir, """**""" )
A__ : Optional[Any] =load_dataset(
"""imagefolder""", data_files=_lowercase, cache_dir=model_args.cache_dir, task="""image-classification""", )
# If we don't have a validation split, split off a percentage of train as validation.
A__ : int =None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _lowercase ) and data_args.train_val_split > 0.0:
A__ : int =dataset["train"].train_test_split(data_args.train_val_split )
A__ : str =split["train"]
A__ : Dict =split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A__ : List[str] =dataset["train"].features["labels"].names
A__ : Union[str, Any] ={}, {}
for i, label in enumerate(_lowercase ):
A__ : int =str(_lowercase )
A__ : List[Any] =label
# Load the accuracy metric from the datasets package
A__ : Union[str, Any] =evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case : Optional[int] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A__ : Any =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_lowercase ), labelaid=_lowercase, idalabel=_lowercase, finetuning_task="""image-classification""", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A__ : List[str] =AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowercase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A__ : List[Any] =AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A__ : str =image_processor.size["shortest_edge"]
else:
A__ : List[str] =(image_processor.size["height"], image_processor.size["width"])
A__ : str =Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A__ : int =Compose(
[
RandomResizedCrop(_lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A__ : List[str] =Compose(
[
Resize(_lowercase ),
CenterCrop(_lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(__snake_case : List[str] ):
A__ : List[Any] =[
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__snake_case : List[Any] ):
A__ : Union[str, Any] =[_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
A__ : Optional[int] =(
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
A__ : Any =(
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowercase )
# Initalize our trainer
A__ : Union[str, Any] =Trainer(
model=_lowercase, args=_lowercase, train_dataset=dataset["""train"""] if training_args.do_train else None, eval_dataset=dataset["""validation"""] if training_args.do_eval else None, compute_metrics=_lowercase, tokenizer=_lowercase, data_collator=_lowercase, )
# Training
if training_args.do_train:
A__ : List[str] =None
if training_args.resume_from_checkpoint is not None:
A__ : List[str] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ : Dict =last_checkpoint
A__ : List[str] =trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics("""train""", train_result.metrics )
trainer.save_metrics("""train""", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ : Dict =trainer.evaluate()
trainer.log_metrics("""eval""", _lowercase )
trainer.save_metrics("""eval""", _lowercase )
# Write model card and (optionally) push to hub
A__ : List[str] ={
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__snake_case : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[int], __snake_case : Optional[int]=8 ) -> int:
"""simple docstring"""
A__ : Dict =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A__ : Dict =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
A__ : List[Any] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
if latents is None:
A__ : List[str] =randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A__ : int =latents.to(__lowerCAmelCase )
A__ : Dict =latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : Any , lowerCAmelCase_ : Any=0 ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A__ : Tuple =torch.device(f"cuda:{gpu_id}" )
A__ : List[str] =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Any=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A__ : Optional[int] =torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A__ : Tuple =None
for cpu_offloaded_model in [self.unet, self.movq]:
A__ , A__ : Optional[int] =cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
A__ : Tuple =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : List[str] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 1_00 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self._execution_device
A__ : str =guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ : Tuple =torch.cat(__lowerCAmelCase , dim=0 )
A__ : str =image_embeds.shape[0] * num_images_per_prompt
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ : Union[str, Any] =torch.cat(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A__ : Any =image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
A__ : Optional[int] =negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
A__ : Optional[int] =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
A__ : Optional[int] =self.scheduler.timesteps
A__ : List[str] =self.unet.config.in_channels
A__ , A__ : Optional[int] =downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
# create initial latent
A__ : Optional[Any] =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A__ : Dict =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : Dict ={"""image_embeds""": image_embeds}
A__ : Optional[Any] =self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
A__ , A__ : List[str] =noise_pred.split(latents.shape[1] , dim=1 )
A__ , A__ : int =noise_pred.chunk(2 )
A__ , A__ : Dict =variance_pred.chunk(2 )
A__ : Optional[int] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A__ : str =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A__ , A__ : Optional[int] =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A__ : Tuple =self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
A__ : Any =self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A__ : Union[str, Any] =image * 0.5 + 0.5
A__ : int =image.clamp(0 , 1 )
A__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : List[str] =self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 710 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
A__ : Optional[Any] =args.pruning_method
A__ : Any =args.threshold
A__ : Dict =args.model_name_or_path.rstrip("""/""" )
A__ : List[Any] =args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
A__ : Any =torch.load(os.path.join(_snake_case, """pytorch_model.bin""" ) )
A__ : Union[str, Any] ={}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ : Any =tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
A__ : List[Any] =tensor
print(f"Copied layer {name}" )
elif "bias" in name:
A__ : Optional[int] =tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
A__ : List[str] =MagnitudeBinarizer.apply(inputs=_snake_case, threshold=_snake_case )
A__ : Any =tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ : Union[str, Any] =name[:-6]
A__ : str =model[f"{prefix_}mask_scores"]
A__ : Tuple =TopKBinarizer.apply(_snake_case, _snake_case )
A__ : Optional[int] =tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ : Optional[int] =name[:-6]
A__ : List[Any] =model[f"{prefix_}mask_scores"]
A__ : int =ThresholdBinarizer.apply(_snake_case, _snake_case, _snake_case )
A__ : Optional[int] =tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ : Optional[int] =name[:-6]
A__ : List[Any] =model[f"{prefix_}mask_scores"]
A__ , A__ : Optional[int] =-0.1, 1.1
A__ : Dict =torch.sigmoid(_snake_case )
A__ : Union[str, Any] =s * (r - l) + l
A__ : Any =s_bar.clamp(min=0.0, max=1.0 )
A__ : Union[str, Any] =tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
A__ : Optional[int] =os.path.join(
os.path.dirname(_snake_case ), f"bertarized_{os.path.basename(_snake_case )}" )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case, _snake_case )
print(f"\nCreated folder {target_model_path}" )
torch.save(_snake_case, os.path.join(_snake_case, """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__snake_case : Optional[int] = parser.parse_args()
main(args)
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'speech_to_text'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , lowerCAmelCase_ : Optional[int]=1_00_00 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=20_48 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[str]=2_56 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : int=60_00 , lowerCAmelCase_ : Tuple=10_24 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Union[str, Any]=(5, 5) , lowerCAmelCase_ : Union[str, Any]=10_24 , lowerCAmelCase_ : str=80 , lowerCAmelCase_ : Union[str, Any]=1 , **lowerCAmelCase_ : List[str] , ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =vocab_size
A__ : int =d_model
A__ : Tuple =encoder_ffn_dim
A__ : List[Any] =encoder_layers
A__ : Optional[Any] =encoder_attention_heads
A__ : List[str] =decoder_ffn_dim
A__ : Optional[int] =decoder_layers
A__ : str =decoder_attention_heads
A__ : int =dropout
A__ : Optional[Any] =attention_dropout
A__ : str =activation_dropout
A__ : Optional[Any] =activation_function
A__ : Tuple =init_std
A__ : Union[str, Any] =encoder_layerdrop
A__ : Tuple =decoder_layerdrop
A__ : Union[str, Any] =use_cache
A__ : Optional[int] =encoder_layers
A__ : int =scale_embedding # scale factor will be sqrt(d_model) if True
A__ : Dict =max_source_positions
A__ : Any =max_target_positions
A__ : str =num_conv_layers
A__ : int =list(_SCREAMING_SNAKE_CASE )
A__ : str =conv_channels
A__ : Optional[int] =input_feat_per_channel
A__ : List[Any] =input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 713 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__snake_case : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def __lowerCamelCase ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
A__ : Optional[int] =k.replace(__snake_case, __snake_case )
return k
def __lowerCamelCase ( __snake_case : List[str], __snake_case : List[str] ) -> Any:
"""simple docstring"""
A__ : Any =DEFAULTS.copy()
cfg_kwargs.update(__snake_case )
A__ : Tuple =PegasusConfig(**__snake_case )
A__ : Any =PegasusForConditionalGeneration(__snake_case )
A__ : Any =torch_model.model.state_dict()
A__ : int ={}
for k, v in tf_weights.items():
A__ : Any =rename_state_dict_key(__snake_case )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
A__ : Optional[int] =v.T
A__ : Union[str, Any] =torch.tensor(__snake_case, dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
A__ : str =torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
A__ : Optional[int] =mapping["""shared.weight"""]
A__ : Dict =mapping["""shared.weight"""]
A__ : Union[str, Any] ={k: torch.zeros_like(__snake_case ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__snake_case )
A__ : Tuple =torch_model.model.load_state_dict(__snake_case, strict=__snake_case )
A__ : List[str] =[
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def __lowerCamelCase ( __snake_case : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Any:
"""simple docstring"""
A__ : Optional[int] =tf.train.list_variables(__snake_case )
A__ : Any ={}
A__ : Optional[Any] =["""Adafactor""", """global_step"""]
for name, shape in tqdm(__snake_case, desc="""converting tf checkpoint to dict""" ):
A__ : Union[str, Any] =any(pat in name for pat in ignore_name )
if skip_key:
continue
A__ : List[str] =tf.train.load_variable(__snake_case, __snake_case )
A__ : Union[str, Any] =array
return tf_weights
def __lowerCamelCase ( __snake_case : List[str], __snake_case : List[Any] ) -> Any:
"""simple docstring"""
A__ : List[Any] =Path(__snake_case ).parent.name
A__ : Optional[int] =task_specific_params[f"summarization_{dataset}"]["""max_position_embeddings"""]
A__ : List[str] =PegasusTokenizer.from_pretrained("""sshleifer/pegasus""", model_max_length=__snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__snake_case )
# convert model
A__ : Optional[Any] =get_tf_weights_as_numpy(__snake_case )
A__ : List[str] =task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
A__ : Any =task_specific_params
A__ : List[str] =convert_pegasus(__snake_case, __snake_case )
torch_model.save_pretrained(__snake_case )
A__ : int =torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__snake_case, Path(__snake_case ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__snake_case : Dict = parser.parse_args()
if args.save_dir is None:
__snake_case : Dict = Path(args.tf_ckpt_path).parent.name
__snake_case : Optional[Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 714 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowercase , )
A__ : Optional[Any] =kwargs.pop("""feature_extractor""" )
A__ : int =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowercase , __lowercase )
def __call__( self : str , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : Optional[int] =self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
A__ : Optional[Any] =self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
A__ : Tuple =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def lowercase__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Tuple ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def lowercase__ ( self : int , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : List[Any] =self.tokenizer.model_input_names
A__ : int =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowercase , )
return self.image_processor_class
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowercase , )
return self.image_processor
| 715 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 42
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default='Translation' , init=__snake_case , repr=__snake_case )
def __call__( self : Tuple ) -> str:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default='TranslationVariableLanguages' , init=__snake_case , repr=__snake_case )
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
A__ : str =len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def lowercase__ ( self : int , lowerCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
A__ : Dict =set(self.languages )
if self.languages and set(A_ ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(A_ ) - lang_set ) )}) are not in valid set ({', '.join(A_ )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A__ : List[str] =[]
for lang, text in translation_dict.items():
if isinstance(A_ , A_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A__ : Dict =zip(*sorted(A_ ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 716 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : Optional[Any] = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ : Dict =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : Optional[int] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : List[str] =generator.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Tuple =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : List[str] ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : Any =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : str =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 717 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__snake_case = logging.getLogger(__name__)
def __lowerCamelCase ( __snake_case : str=2, __snake_case : Optional[Any]=3, __snake_case : Union[str, Any]=16, __snake_case : Optional[int] = 10, __snake_case : List[Any] = 2 ) -> str:
"""simple docstring"""
def get_dataset(__snake_case : Optional[int] ):
A__ : Tuple =torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__lowerCAmelCase, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
A__ : Dict =get_dataset(__lowerCAmelCase )
A__ : Optional[int] =get_dataset(__lowerCAmelCase )
A__ : Any =DataLoader(__lowerCAmelCase, shuffle=__lowerCAmelCase, batch_size=__lowerCAmelCase, num_workers=4 )
A__ : str =DataLoader(__lowerCAmelCase, shuffle=__lowerCAmelCase, batch_size=__lowerCAmelCase, num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : str, __snake_case : Optional[Any], __snake_case : int, __snake_case : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] =[]
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
A__ : Optional[int] =batch
A__ : Optional[int] =model(__lowerCAmelCase )
A__ : Optional[int] =torch.nn.functional.mse_loss(__lowerCAmelCase, __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> int:
'''simple docstring'''
super().__init__()
A__ : Any =nn.Parameter(torch.randn(1 ) )
A__ : Tuple =nn.Parameter(torch.randn(1 ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return x * self.a + self.b
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : List[Any] =DummyModel()
A__ : List[str] =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Union[str, Any] =dummy_dataloaders()
A__ : Tuple =ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
A__ : List[str] =Accelerator(project_config=lowerCamelCase__ )
A__ : Optional[Any] =accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : List[Any] =DummyModel()
A__ : Dict =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Union[str, Any] =dummy_dataloaders()
# Train baseline
A__ : List[str] =Accelerator()
A__ : Optional[Any] =accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
A__ : Any =os.path.join(lowerCamelCase__ , """initial""" )
accelerator.save_state(lowerCamelCase__ )
(A__) : Union[str, Any] =model.a.item(), model.b.item()
A__ : str =optimizer.state_dict()
A__ : Tuple =train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(A__) : Optional[int] =model.a.item(), model.b.item()
A__ : List[str] =optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[Any] =DummyModel()
A__ : int =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : List[Any] =dummy_dataloaders()
A__ : Union[str, Any] =Accelerator()
A__ : Optional[Any] =accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
(A__) : Tuple =model.a.item(), model.b.item()
A__ : Union[str, Any] =optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ : Optional[Any] =train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
A__ : Optional[Any] =os.path.join(lowerCamelCase__ , """checkpoint""" )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(A__) : Union[str, Any] =model.a.item(), model.b.item()
A__ : Dict =optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] =DummyModel()
A__ : int =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Any =dummy_dataloaders()
A__ : Union[str, Any] =ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
A__ : str =Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
A__ : Optional[Any] =accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
(A__) : Optional[Any] =model.a.item(), model.b.item()
A__ : Union[str, Any] =optimizer.state_dict()
A__ : Optional[Any] =train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(A__) : Dict =model.a.item(), model.b.item()
A__ : Optional[Any] =optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[Any] =DummyModel()
A__ : str =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict =dummy_dataloaders()
A__ : Optional[Any] =ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
A__ : Optional[Any] =Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
A__ : Union[str, Any] =accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , """checkpoints""" , """checkpoint_0""" ) )
(A__) : int =model.a.item(), model.b.item()
A__ : int =optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ : str =train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(A__) : Dict =model.a.item(), model.b.item()
A__ : Dict =optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : int =torch.tensor([1, 2, 3] )
A__ : Any =torch.tensor([2, 3, 4] )
A__ : Optional[int] =DummyModel()
A__ : List[Any] =torch.optim.Adam(net.parameters() )
A__ : Union[str, Any] =Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A__ : Tuple =str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[int] =DummyModel()
A__ : int =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : int =torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.99 )
A__ : Any =dummy_dataloaders()
A__ : Any =ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
A__ : Optional[Any] =Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
A__ : int =accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
A__ : Union[str, Any] =scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Dict =DummyModel()
A__ : int =ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
A__ : Optional[int] =Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
A__ : Union[str, Any] =accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = '/tmp/accelerate/state_checkpointing'
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__snake_case , __snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__snake_case = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__snake_case = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__snake_case = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( __a , __a ):
'''simple docstring'''
__snake_case = "bit"
__snake_case = ["preactivation", "bottleneck"]
__snake_case = ["SAME", "VALID"]
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[str]=64 , lowerCAmelCase_ : Optional[Any]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : int=[3, 4, 6, 3] , lowerCAmelCase_ : int="preactivation" , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : str =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : str =num_channels
A__ : Dict =embedding_size
A__ : int =hidden_sizes
A__ : Optional[int] =depths
A__ : Union[str, Any] =layer_type
A__ : Union[str, Any] =hidden_act
A__ : Tuple =global_padding
A__ : str =num_groups
A__ : Optional[int] =drop_path_rate
A__ : List[Any] =embedding_dynamic_padding
A__ : Tuple =output_stride
A__ : List[Any] =width_factor
A__ : Optional[int] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 719 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 0 |
'''simple docstring'''
from math import isclose, sqrt
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> tuple[float, float, float]:
"""simple docstring"""
A__ : List[Any] =point_y / 4 / point_x
A__ : Dict =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A__ : Tuple =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A__ : Optional[int] =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A__ : Tuple =outgoing_gradient**2 + 4
A__ : List[Any] =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A__ : Optional[int] =(point_y - outgoing_gradient * point_x) ** 2 - 100
A__ : Dict =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A__ : Union[str, Any] =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A__ : Union[str, Any] =x_minus if isclose(UpperCamelCase__, UpperCamelCase__ ) else x_plus
A__ : str =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowerCamelCase ( __snake_case : float = 1.4, __snake_case : float = -9.6 ) -> int:
"""simple docstring"""
A__ : int =0
A__ : float =first_x_coord
A__ : float =first_y_coord
A__ : float =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A__ : Any =next_point(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 720 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLMTokenizer
__snake_case = False
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : Any =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ : Union[str, Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
A__ : List[str] =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase__ ) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
A__ : Tuple ="""lower newer"""
A__ : Any ="""lower newer"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ : List[str] =XLMTokenizer(self.vocab_file , self.merges_file )
A__ : Any ="""lower"""
A__ : List[Any] =["""low""", """er</w>"""]
A__ : List[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ : str =tokens + ["""<unk>"""]
A__ : Tuple =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
A__ : List[str] =tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase__ )
A__ : List[Any] =tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase__ )
A__ : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
A__ : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 721 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( __a ):
'''simple docstring'''
__snake_case = ['input_features', 'attention_mask']
def __init__( self : Dict , lowerCAmelCase_ : Any=80 , lowerCAmelCase_ : List[Any]=1_60_00 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : Optional[Any]=25 , lowerCAmelCase_ : Union[str, Any]="hamming_window" , lowerCAmelCase_ : List[str]=3_27_68.0 , lowerCAmelCase_ : List[Any]=0.97 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=False , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(feature_size=A__ , sampling_rate=A__ , padding_value=A__ , **A__ )
A__ : Optional[Any] =feature_size
A__ : List[str] =sampling_rate
A__ : Tuple =padding_value
A__ : List[Any] =hop_length
A__ : Optional[Any] =win_length
A__ : List[Any] =frame_signal_scale
A__ : Union[str, Any] =preemphasis_coeff
A__ : List[str] =mel_floor
A__ : str =normalize_means
A__ : Dict =normalize_vars
A__ : Dict =win_function
A__ : Optional[Any] =return_attention_mask
A__ : Optional[int] =win_length * sampling_rate // 10_00
A__ : Tuple =hop_length * sampling_rate // 10_00
A__ : Dict =optimal_fft_length(self.sample_size )
A__ : List[str] =(self.n_fft // 2) + 1
def lowercase__ ( self : int , lowerCAmelCase_ : Tuple ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
A__ : Tuple =window_function(window_length=self.sample_size , name=self.win_function , periodic=A__ )
else:
A__ : List[Any] =window_function(window_length=self.sample_size , name=self.win_function )
A__ : Optional[Any] =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A__ : Tuple =spectrogram(
one_waveform * self.frame_signal_scale , window=A__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A__ , preemphasis=self.preemphasis_coeff , mel_filters=A__ , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def lowercase__ ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
A__ : Optional[int] =x[:input_length].mean(axis=0 )
A__ : Optional[int] =np.subtract(A__ , A__ )
if self.normalize_vars:
A__ : List[str] =x[:input_length].std(axis=0 )
A__ : List[Any] =np.divide(A__ , A__ )
if input_length < x.shape[0]:
A__ : int =padding_value
# make sure array is in float32
A__ : Optional[Any] =x.astype(np.floataa )
return x
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str = None ) -> List[np.ndarray]:
'''simple docstring'''
A__ : str =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A__ , A__ , self.padding_value ) for x, n in zip(A__ , A__ )]
def __call__( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] = False , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Any = False , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A__ : Tuple =isinstance(A__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A__ : int =is_batched_numpy or (
isinstance(A__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : List[Any] =[np.asarray(A__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A__ , np.ndarray ):
A__ : Tuple =np.asarray(A__ , dtype=np.floataa )
elif isinstance(A__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ : str =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : Optional[Any] =[raw_speech]
# extract fbank features
A__ : Optional[int] =[self._extract_mfsc_features(A__ ) for one_waveform in raw_speech]
# convert into correct format for padding
A__ : Optional[Any] =BatchFeature({"""input_features""": features} )
A__ : Dict =self.pad(
A__ , padding=A__ , max_length=A__ , truncation=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , **A__ , )
# make sure list is in array format
A__ : Tuple =padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , A__ ):
A__ : Union[str, Any] =[np.asarray(A__ , dtype=np.floataa ) for feature in input_features]
A__ : Optional[Any] =padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A__ : Tuple =[np.asarray(A__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A__ : Dict =(
np.array(A__ , dtype=np.intaa )
if self._get_padding_strategies(A__ , max_length=A__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A__ : int =self.normalize(
padded_inputs["""input_features"""] , attention_mask=A__ )
if return_tensors is not None:
A__ : int =padded_inputs.convert_to_tensors(A__ )
return padded_inputs
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Any ) -> Image:
"""simple docstring"""
def brightness(__snake_case : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__snake_case : List[Any] = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import math
class lowerCamelCase :
'''simple docstring'''
def lowercase__ ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : Tuple =0.0
A__ : List[Any] =0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Union[str, Any] =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A__ : str =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A__ : List[str] =SelfOrganizingMap()
A__ : str =3
A__ : Optional[int] =0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
A__ : List[str] =training_samples[j]
# Compute the winning vector
A__ : Dict =self_organizing_map.get_winner(lowercase__, lowercase__ )
# Update the winning vector
A__ : Dict =self_organizing_map.update(lowercase__, lowercase__, lowercase__, lowercase__ )
# classify test sample
A__ : List[str] =[0, 0, 0, 1]
A__ : List[Any] =self_organizing_map.get_winner(lowercase__, lowercase__ )
# results
print(f"Clusters that the test sample belongs to : {winner}" )
print(f"Weights that have been trained : {weights}" )
# running the main() function
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__snake_case : int = (720, 1280) # Height, Width
__snake_case : Union[str, Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
__snake_case : List[str] = 1 / 100
__snake_case : List[Any] = ''
__snake_case : Union[str, Any] = ''
__snake_case : List[str] = ''
__snake_case : Optional[int] = 250
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] =get_dataset(lowerCAmelCase_, lowerCAmelCase_ )
for index in range(lowerCAmelCase_ ):
A__ : Dict =random.sample(range(len(lowerCAmelCase_ ) ), 4 )
A__ : List[Any] =update_image_and_anno(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, filter_scale=lowerCAmelCase_, )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ : Optional[Any] =random_chars(32 )
A__ : List[Any] =path.split(os.sep )[-1].rsplit(""".""", 1 )[0]
A__ : str =f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(f"{file_root}.jpg", lowerCAmelCase_, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
A__ : Optional[int] =[]
for anno in new_annos:
A__ : str =anno[3] - anno[1]
A__ : Any =anno[4] - anno[2]
A__ : Dict =anno[1] + width / 2
A__ : Union[str, Any] =anno[2] + height / 2
A__ : Union[str, Any] =f"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(lowerCAmelCase_ )
with open(f"{file_root}.txt", """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ : Dict =[]
A__ : str =[]
for label_file in glob.glob(os.path.join(lowerCAmelCase_, """*.txt""" ) ):
A__ : Union[str, Any] =label_file.split(os.sep )[-1].rsplit(""".""", 1 )[0]
with open(lowerCAmelCase_ ) as in_file:
A__ : int =in_file.readlines()
A__ : Union[str, Any] =os.path.join(lowerCAmelCase_, f"{label_name}.jpg" )
A__ : Any =[]
for obj_list in obj_lists:
A__ : Dict =obj_list.rstrip("""\n""" ).split(""" """ )
A__ : Union[str, Any] =float(obj[1] ) - float(obj[3] ) / 2
A__ : str =float(obj[2] ) - float(obj[4] ) / 2
A__ : Any =float(obj[1] ) + float(obj[3] ) / 2
A__ : Optional[Any] =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowerCAmelCase_ )
labels.append(lowerCAmelCase_ )
return img_paths, labels
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : Optional[int], __snake_case : int = 0.0, ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[Any] =np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta )
A__ : Tuple =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ : Dict =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ : List[str] =int(scale_x * output_size[1] )
A__ : List[Any] =int(scale_y * output_size[0] )
A__ : List[Any] =[]
A__ : Any =[]
for i, index in enumerate(lowerCAmelCase_ ):
A__ : Tuple =all_img_list[index]
path_list.append(lowerCAmelCase_ )
A__ : List[Any] =all_annos[index]
A__ : Optional[Any] =cva.imread(lowerCAmelCase_ )
if i == 0: # top-left
A__ : List[str] =cva.resize(lowerCAmelCase_, (divid_point_x, divid_point_y) )
A__ : str =img
for bbox in img_annos:
A__ : List[Any] =bbox[1] * scale_x
A__ : str =bbox[2] * scale_y
A__ : Any =bbox[3] * scale_x
A__ : List[str] =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A__ : List[Any] =cva.resize(lowerCAmelCase_, (output_size[1] - divid_point_x, divid_point_y) )
A__ : Union[str, Any] =img
for bbox in img_annos:
A__ : int =scale_x + bbox[1] * (1 - scale_x)
A__ : List[str] =bbox[2] * scale_y
A__ : List[str] =scale_x + bbox[3] * (1 - scale_x)
A__ : Dict =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A__ : Optional[int] =cva.resize(lowerCAmelCase_, (divid_point_x, output_size[0] - divid_point_y) )
A__ : Dict =img
for bbox in img_annos:
A__ : int =bbox[1] * scale_x
A__ : Optional[int] =scale_y + bbox[2] * (1 - scale_y)
A__ : int =bbox[3] * scale_x
A__ : int =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A__ : Union[str, Any] =cva.resize(
lowerCAmelCase_, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A__ : Union[str, Any] =img
for bbox in img_annos:
A__ : Union[str, Any] =scale_x + bbox[1] * (1 - scale_x)
A__ : Dict =scale_y + bbox[2] * (1 - scale_y)
A__ : str =scale_x + bbox[3] * (1 - scale_x)
A__ : str =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A__ : int =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowerCamelCase ( __snake_case : Dict ) -> Any:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A__ : Dict =ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 703 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Union[tf.Tensor, np.ndarray] ) -> Dict:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__, np.ndarray ):
return list(tensor.shape )
A__ : Optional[int] =tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
A__ : List[str] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def __lowerCamelCase ( __snake_case : tf.Tensor, __snake_case : Optional[int] = None, __snake_case : Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9, axis=SCREAMING_SNAKE_CASE__, name=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Tuple, __snake_case : Optional[int], __snake_case : Optional[Any]=1E-5, __snake_case : Union[str, Any]=-1 ) -> Optional[Any]:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A__ : str =tf.nn.moments(SCREAMING_SNAKE_CASE__, axes=[axis], keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A__ : Optional[Any] =[1] * inputs.shape.rank
A__ : Any =shape_list(SCREAMING_SNAKE_CASE__ )[axis]
A__ : List[str] =tf.reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
A__ : List[Any] =tf.reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
A__ : List[Any] =tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, offset=SCREAMING_SNAKE_CASE__, scale=SCREAMING_SNAKE_CASE__, variance_epsilon=SCREAMING_SNAKE_CASE__, )
return outputs
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Dict=0, __snake_case : Dict=-1 ) -> List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A__ : int =tf.shape(SCREAMING_SNAKE_CASE__ )
A__ : str =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A__ : List[str] =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( __snake_case : tf.Tensor ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, tf.Tensor ):
A__ : Optional[Any] =tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A__ : Optional[int] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A__ : List[Any] =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A__ : int =(
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowerCamelCase ( __snake_case : tf.Tensor, __snake_case : int, __snake_case : str = "input_ids" ) -> Optional[Any]:
"""simple docstring"""
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__, tf.cast(SCREAMING_SNAKE_CASE__, dtype=tensor.dtype ), message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding "
f"layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
), )
def __lowerCamelCase ( __snake_case : int, __snake_case : Optional[Any], __snake_case : int ) -> Dict:
"""simple docstring"""
A__ : Any =64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A__ : Optional[int] =[x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
A__ : str =np.asarray(SCREAMING_SNAKE_CASE__ )
A__ : Union[str, Any] =1
A__ : Optional[Any] =np.array_split(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A__ : Any =np.array_split(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ : List[Any] =chunk_data
else:
A__ : Union[str, Any] =data
def __lowerCamelCase ( __snake_case : int, __snake_case : Dict ) -> Dict:
"""simple docstring"""
if name in group.attrs:
A__ : Tuple =[n.decode("""utf8""" ) if hasattr(SCREAMING_SNAKE_CASE__, """decode""" ) else n for n in group.attrs[name]]
else:
A__ : Tuple =[]
A__ : Optional[Any] =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(SCREAMING_SNAKE_CASE__, """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowerCamelCase ( __snake_case : List[str] ) -> Dict:
"""simple docstring"""
def _expand_single_ad_tensor(__snake_case : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, SCREAMING_SNAKE_CASE__ )
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 0 |
def __lowerCamelCase ( __snake_case : list ) -> List[str]:
"""simple docstring"""
A__ : str =len(a_ )
for _ in range(a_ ):
for i in range(_ % 2, arr_size - 1, 2 ):
if arr[i + 1] < arr[i]:
A__ : Optional[Any] =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__snake_case : Tuple = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 705 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : int = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
pass
class lowerCamelCase :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Any ) -> None:
'''simple docstring'''
A__ : Any =data
A__ : Node | None =None
def __iter__( self : Union[str, Any] ) -> int:
'''simple docstring'''
A__ : Any =self
A__ : Tuple =[]
while node:
if node in visited:
raise ContainsLoopError
visited.append(__a )
yield node.data
A__ : List[Any] =node.next_node
@property
def lowercase__ ( self : Dict ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__snake_case : str = Node(1)
__snake_case : Optional[Any] = Node(2)
__snake_case : List[str] = Node(3)
__snake_case : List[str] = Node(4)
print(root_node.has_loop) # False
__snake_case : int = root_node.next_node
print(root_node.has_loop) # True
__snake_case : int = Node(5)
__snake_case : int = Node(6)
__snake_case : Union[str, Any] = Node(5)
__snake_case : List[Any] = Node(6)
print(root_node.has_loop) # False
__snake_case : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 707 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
from collections import defaultdict
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[int] ) -> bool:
"""simple docstring"""
A__ : Tuple =first_str.lower().strip()
A__ : List[Any] =second_str.lower().strip()
# Remove whitespace
A__ : List[Any] =first_str.replace(""" """, """""" )
A__ : Any =second_str.replace(""" """, """""" )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
A__ : Union[str, Any] =defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case : str = input('Enter the first string ').strip()
__snake_case : Optional[int] = input('Enter the second string ').strip()
__snake_case : int = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 708 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : str =current_set.copy()
for row_index, row in enumerate(__snake_case ):
A__ : Union[str, Any] =row[0]
for column_index, column in enumerate(__snake_case ):
if magnitude == 0:
A__ : Optional[int] =column
continue
A__ : Any =column / magnitude
# Subtract to cancel term
A__ : str =current_set[0]
A__ : str =[first_row]
A__ : Union[str, Any] =current_set[1::]
for row in current_set:
A__ : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__snake_case )
continue
for column_index in range(len(__snake_case ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__snake_case )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A__ : Union[str, Any] =final_set[0]
A__ : Optional[Any] =[]
A__ : str =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A__ : List[Any] =simplify(__snake_case )
for i in range(len(__snake_case ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, __snake_case )
A__ : int =resultant
return final_set
def __lowerCamelCase ( __snake_case : int ) -> Optional[Any]:
"""simple docstring"""
if len(__snake_case ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
A__ : List[str] =len(__snake_case ) + 1
if any(len(__snake_case ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(__snake_case, (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(__snake_case ) == 1:
return [equations[0][-1] / equations[0][0]]
A__ : int =equations.copy()
if any(0 in row for row in data_set ):
A__ : str =data_set.copy()
A__ : str =[]
for row_index, row in enumerate(__snake_case ):
if 0 not in row:
A__ : Optional[int] =data_set.pop(__snake_case )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0, __snake_case )
A__ : Any =data_set.copy()
A__ : Any =simplify(__snake_case )
A__ : str =simplified[::-1]
A__ : Tuple =[]
for row in simplified:
A__ : Dict =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A__ : Optional[int] =row.copy()[: len(__snake_case ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__snake_case ) == 0:
solutions.append(0 )
continue
A__ : List[Any] =temp_row[1::]
A__ : Any =temp_row[::-1]
for column_index, column in enumerate(__snake_case ):
current_solution -= column * solutions[column_index]
solutions.append(__snake_case )
A__ : int =[]
for item in solutions:
final.append(float(round(__snake_case, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 709 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : Any =tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
A__ : Optional[int] =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
A__ : int =tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
A__ : Union[str, Any] =tf_top_k_top_p_filtering(_a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
A__ : Tuple =output[output != -float("""inf""" )]
A__ : List[Any] =tf.cast(
tf.where(tf.not_equal(_a , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_a , _a , rtol=1e-12 )
tf.debugging.assert_equal(_a , _a )
@require_tf
class lowerCamelCase ( unittest.TestCase , UpperCamelCase__ ):
'''simple docstring'''
if is_tf_available():
__snake_case = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
# TF-only test: tf.saved_model export
A__ : Optional[Any] =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : int =2
A__ : int =2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
super(_a , self ).__init__()
A__ : int =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
A__ : Any =self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
A__ : int =[[2, 0], [1_02, 1_03]]
A__ : Any =[[1, 0], [1, 1]]
A__ : Optional[int] =DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
A__ : Union[str, Any] =tf.saved_model.load(_a ).signatures["""serving_default"""]
for batch_size in range(1 , len(_a ) + 1 ):
A__ : List[Any] ={
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
A__ : List[Any] =serving_func(**_a )["""sequences"""]
A__ : List[Any] =test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
# TF-only test: tf.saved_model export
A__ : Tuple =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : Union[str, Any] =1
A__ : str =2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
super(_a , self ).__init__()
A__ : Optional[Any] =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def lowercase__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
A__ : Optional[Any] =[[2], [1_02, 1_03]]
A__ : List[Any] =[[1], [1, 1]]
A__ : List[Any] =DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
A__ : Optional[int] =tf.saved_model.load(_a ).signatures["""serving_default"""]
for input_row in range(len(_a ) ):
A__ : List[str] ={
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
A__ : int =serving_func(**_a )["""sequences"""]
A__ : str =test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_a )
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().__init__()
A__ : str =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_a , """spiece.model""" ) , """rb""" ).read() )
A__ : str =TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer.tokenize(_a )
A__ : Any =text.pad_model_inputs(
_a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
A__ : Any =self.model.generate(input_ids=_a , attention_mask=_a )
return self.tokenizer.detokenize(_a )
A__ : Optional[int] =CompleteSentenceTransformer()
A__ : Dict =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
A__ : List[str] =complete_model(_a )
A__ : List[Any] =tf.keras.Model(_a , _a )
keras_model.save(_a )
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
# Has PT equivalent: this test relies on random sampling
A__ : Optional[int] ={
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
A__ : Any =14
A__ : Optional[int] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : str ="""Hello, my dog is cute and"""
A__ : Optional[Any] =tokenizer(_a , return_tensors="""tf""" )
A__ : List[str] =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : Optional[Any] =6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A__ : Dict =model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
A__ : int =[6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A__ : int =model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
# Has PT equivalent: ample use of framework-specific code
A__ : Any =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A__ : Tuple ="""Hugging Face is a technology company based in New York and Paris."""
A__ : Union[str, Any] =bart_tokenizer(_a , return_tensors="""tf""" ).input_ids
A__ : Dict =TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A__ : Optional[Any] =bart_model.generate(_a ).numpy()
class lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
return super().call(_a , **_a )
A__ : Union[str, Any] =FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A__ : List[str] =bart_model.generate(_a , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_a , _a ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowercase__ ( self : str , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return super().call(_a , **_a )
A__ : Optional[int] =FakeEncoder(bart_model.config , bart_model.model.shared )
A__ : Optional[int] =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
A__ : Optional[int] =bart_model.generate(_a ).numpy()
with self.assertRaises(_a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_a , foo="""bar""" )
| 710 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 0 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Tuple="shi-labs/oneformer_demo" ) -> Dict:
"""simple docstring"""
with open(hf_hub_download(__UpperCamelCase, __UpperCamelCase, repo_type="""dataset""" ), """r""" ) as f:
A__ : List[str] =json.load(__UpperCamelCase )
A__ : int ={}
A__ : Dict =[]
A__ : Tuple =[]
for key, info in class_info.items():
A__ : Dict =info['''name''']
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__UpperCamelCase ) )
A__ : Union[str, Any] =thing_ids
A__ : Dict =class_names
return metadata
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=30 , lowerCAmelCase_ : str=4_00 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : List[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : int=2_55 , lowerCAmelCase_ : Any="shi-labs/oneformer_demo" , lowerCAmelCase_ : Dict="ade20k_panoptic.json" , lowerCAmelCase_ : int=10 , ) -> Optional[int]:
'''simple docstring'''
A__ : Any =parent
A__ : Union[str, Any] =batch_size
A__ : Any =num_channels
A__ : Optional[int] =min_resolution
A__ : Optional[int] =max_resolution
A__ : Optional[int] =do_resize
A__ : List[Any] ={'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
A__ : List[str] =do_normalize
A__ : Any =image_mean
A__ : Union[str, Any] =image_std
A__ : int =class_info_file
A__ : Optional[Any] =prepare_metadata(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] =num_text
A__ : Optional[int] =repo_path
# for the post_process_functions
A__ : Dict =2
A__ : Any =10
A__ : Tuple =10
A__ : List[Any] =3
A__ : Dict =4
A__ : int =num_labels
A__ : Union[str, Any] =do_reduce_labels
A__ : List[str] =ignore_index
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any=False ) -> List[str]:
'''simple docstring'''
if not batched:
A__ : List[str] =image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
A__ : Any =image.size
else:
A__ : int =image.shape[1], image.shape[2]
if w < h:
A__ : List[Any] =int(self.size["""shortest_edge"""] * h / w )
A__ : str =self.size['''shortest_edge''']
elif w > h:
A__ : List[Any] =self.size['''shortest_edge''']
A__ : Dict =int(self.size["""shortest_edge"""] * w / h )
else:
A__ : List[str] =self.size['''shortest_edge''']
A__ : Optional[int] =self.size['''shortest_edge''']
else:
A__ : Tuple =[]
for image in image_inputs:
A__ : Tuple =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ : List[Any] =max(UpperCamelCase__ , key=lambda lowerCAmelCase_ : item[0] )[0]
A__ : Union[str, Any] =max(UpperCamelCase__ , key=lambda lowerCAmelCase_ : item[1] )[1]
return expected_height, expected_width
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__snake_case = image_processing_class
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =OneFormerImageProcessorTester(self )
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """ignore_index""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """class_info_file""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """num_text""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """repo_path""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """metadata""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_reduce_labels""" ) )
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
# Initialize image_processor
A__ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Dict =prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ : int =image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
A__ : Any =self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : str =self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ : Optional[Any] =image_processor(
UpperCamelCase__ , ["""semantic"""] * len(UpperCamelCase__ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# Initialize image_processor
A__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Dict =prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ : int =image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
A__ : Optional[Any] =self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : List[str] =self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ : Optional[Any] =image_processor(
UpperCamelCase__ , ["""semantic"""] * len(UpperCamelCase__ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
# Initialize image_processor
A__ : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Union[str, Any] =prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ : Optional[int] =image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
A__ : Union[str, Any] =self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : int =self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ : Any =image_processor(
UpperCamelCase__ , ["""semantic"""] * len(UpperCamelCase__ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : str="np" ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A__ : Optional[int] =self.image_processing_tester.num_labels
A__ : Tuple =None
A__ : Optional[int] =None
A__ : List[str] =prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
if with_segmentation_maps:
A__ : Dict =num_labels
if is_instance_map:
A__ : Any =list(range(UpperCamelCase__ ) ) * 2
A__ : List[Any] =dict(enumerate(UpperCamelCase__ ) )
A__ : Tuple =[
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A__ : Optional[int] =[Image.fromarray(UpperCamelCase__ ) for annotation in annotations]
A__ : Optional[int] =image_processor(
UpperCamelCase__ , ["""semantic"""] * len(UpperCamelCase__ ) , UpperCamelCase__ , return_tensors="""pt""" , instance_id_to_semantic_id=UpperCamelCase__ , pad_and_return_pixel_mask=UpperCamelCase__ , )
return inputs
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
def common(lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=None ):
A__ : List[Any] =self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase__ , is_instance_map=UpperCamelCase__ , segmentation_type=UpperCamelCase__ )
A__ : Optional[int] =inputs['''mask_labels''']
A__ : Union[str, Any] =inputs['''class_labels''']
A__ : Dict =inputs['''pixel_values''']
A__ : Union[str, Any] =inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase__ )
common(is_instance_map=UpperCamelCase__ , segmentation_type="""pil""" )
common(is_instance_map=UpperCamelCase__ , segmentation_type="""pil""" )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =np.zeros((20, 50) )
A__ : Any =1
A__ : str =1
A__ : Dict =1
A__ : Tuple =binary_mask_to_rle(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
A__ : List[Any] =self.image_processing_tester.get_fake_oneformer_outputs()
A__ : Union[str, Any] =fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A__ : Dict =[(1, 4) for i in range(self.image_processing_tester.batch_size )]
A__ : Optional[int] =fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ , target_sizes=UpperCamelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
A__ : int =self.image_processing_tester.get_fake_oneformer_outputs()
A__ : str =image_processor.post_process_instance_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCamelCase__ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Tuple =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
A__ : Tuple =self.image_processing_tester.get_fake_oneformer_outputs()
A__ : int =image_processor.post_process_panoptic_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCamelCase__ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict=7 ) -> List[str]:
"""simple docstring"""
A__ : Union[str, Any] =None
if token is not None:
A__ : Optional[Any] ={"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
A__ : Optional[Any] ="""636036"""
A__ : Optional[int] =f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
A__ : int =requests.get(lowerCamelCase_, headers=lowerCamelCase_ ).json()
return result["workflow_runs"]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ : int =get_daily_ci_runs(lowerCamelCase_ )
A__ : Dict =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ : Tuple =workflow_run["""id"""]
break
return workflow_run_id
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : int, __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
A__ : List[Any] =get_last_daily_ci_runs(lowerCamelCase_ )
if workflow_run_id is not None:
A__ : Any =get_artifacts_links(worflow_run_id=lowerCamelCase_, token=lowerCamelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ : List[str] =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCamelCase_, artifact_url=lowerCamelCase_, output_dir=lowerCamelCase_, token=lowerCamelCase_ )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str], __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
A__ : int ={}
for artifact_name in artifact_names:
A__ : List[str] =os.path.join(lowerCamelCase_, f"{artifact_name}.zip" )
if os.path.isfile(lowerCamelCase_ ):
A__ : Dict ={}
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
with z.open(lowerCamelCase_ ) as f:
A__ : Dict =f.read().decode("""UTF-8""" )
return results
| 712 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 0 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Optional[Any]="shi-labs/oneformer_demo" ) -> List[str]:
"""simple docstring"""
with open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) as f:
A__ : Any =json.load(__snake_case )
A__ : Dict ={}
A__ : int =[]
A__ : Optional[int] =[]
for key, info in class_info.items():
A__ : Optional[int] =info["name"]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__snake_case ) )
A__ : Any =thing_ids
A__ : Dict =class_names
return metadata
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Dict=30 , lowerCAmelCase_ : Dict=4_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : int=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=2_55 , lowerCAmelCase_ : str="shi-labs/oneformer_demo" , lowerCAmelCase_ : List[str]="ade20k_panoptic.json" , lowerCAmelCase_ : Dict=10 , ) -> Tuple:
'''simple docstring'''
A__ : Any =parent
A__ : Optional[int] =batch_size
A__ : Any =num_channels
A__ : Dict =min_resolution
A__ : Any =max_resolution
A__ : Optional[Any] =do_resize
A__ : Dict ={"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
A__ : List[str] =do_normalize
A__ : Optional[Any] =image_mean
A__ : List[Any] =image_std
A__ : List[Any] =class_info_file
A__ : Optional[Any] =prepare_metadata(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =num_text
A__ : List[str] =repo_path
# for the post_process_functions
A__ : List[str] =2
A__ : str =10
A__ : Optional[int] =10
A__ : Tuple =3
A__ : List[Any] =4
A__ : Any =num_labels
A__ : List[Any] =do_reduce_labels
A__ : Union[str, Any] =ignore_index
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase__ ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> List[str]:
'''simple docstring'''
if not batched:
A__ : int =image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image ):
A__ : Any =image.size
else:
A__ : List[str] =image.shape[1], image.shape[2]
if w < h:
A__ : List[str] =int(self.size["""shortest_edge"""] * h / w )
A__ : Optional[int] =self.size["shortest_edge"]
elif w > h:
A__ : Dict =self.size["shortest_edge"]
A__ : str =int(self.size["""shortest_edge"""] * w / h )
else:
A__ : Optional[Any] =self.size["shortest_edge"]
A__ : Dict =self.size["shortest_edge"]
else:
A__ : Optional[int] =[]
for image in image_inputs:
A__ : List[Any] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ : Tuple =max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[0] )[0]
A__ : Tuple =max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[1] )[1]
return expected_height, expected_width
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__snake_case = image_processing_class
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =OneFormerImageProcessorTester(self )
@property
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """ignore_index""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """class_info_file""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """num_text""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """repo_path""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """metadata""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_reduce_labels""" ) )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
# Initialize image_processor
A__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : List[str] =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
A__ : Any =image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
A__ : Optional[int] =self.image_processing_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : Optional[Any] =self.image_processing_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
A__ : Optional[int] =image_processor(
lowerCAmelCase_ , ["""semantic"""] * len(lowerCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
# Initialize image_processor
A__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Union[str, Any] =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
A__ : str =image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
A__ : str =self.image_processing_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : int =self.image_processing_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
A__ : Tuple =image_processor(
lowerCAmelCase_ , ["""semantic"""] * len(lowerCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
# Initialize image_processor
A__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Tuple =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
A__ : Optional[Any] =image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
A__ : List[Any] =self.image_processing_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : Any =self.image_processing_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
A__ : List[Any] =image_processor(
lowerCAmelCase_ , ["""semantic"""] * len(lowerCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]="np" ) -> List[Any]:
'''simple docstring'''
A__ : Any =self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A__ : Dict =self.image_processing_tester.num_labels
A__ : Tuple =None
A__ : Optional[Any] =None
A__ : Dict =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ )
if with_segmentation_maps:
A__ : str =num_labels
if is_instance_map:
A__ : List[str] =list(range(lowerCAmelCase_ ) ) * 2
A__ : Tuple =dict(enumerate(lowerCAmelCase_ ) )
A__ : int =[
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A__ : int =[Image.fromarray(lowerCAmelCase_ ) for annotation in annotations]
A__ : List[str] =image_processor(
lowerCAmelCase_ , ["""semantic"""] * len(lowerCAmelCase_ ) , lowerCAmelCase_ , return_tensors="""pt""" , instance_id_to_semantic_id=lowerCAmelCase_ , pad_and_return_pixel_mask=lowerCAmelCase_ , )
return inputs
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
def common(lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Union[str, Any]=None ):
A__ : Any =self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase_ , is_instance_map=lowerCAmelCase_ , segmentation_type=lowerCAmelCase_ )
A__ : Optional[Any] =inputs["mask_labels"]
A__ : Any =inputs["class_labels"]
A__ : List[Any] =inputs["pixel_values"]
A__ : List[str] =inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase_ )
common(is_instance_map=lowerCAmelCase_ , segmentation_type="""pil""" )
common(is_instance_map=lowerCAmelCase_ , segmentation_type="""pil""" )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =np.zeros((20, 50) )
A__ : Optional[Any] =1
A__ : Optional[Any] =1
A__ : Any =1
A__ : Dict =binary_mask_to_rle(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
A__ : Optional[int] =self.image_processing_tester.get_fake_oneformer_outputs()
A__ : List[Any] =fature_extractor.post_process_semantic_segmentation(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A__ : List[Any] =[(1, 4) for i in range(self.image_processing_tester.batch_size )]
A__ : Optional[Any] =fature_extractor.post_process_semantic_segmentation(lowerCAmelCase_ , target_sizes=lowerCAmelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : List[str] =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
A__ : List[str] =self.image_processing_tester.get_fake_oneformer_outputs()
A__ : Dict =image_processor.post_process_instance_segmentation(lowerCAmelCase_ , threshold=0 )
self.assertTrue(len(lowerCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowerCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : str =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
A__ : Tuple =self.image_processing_tester.get_fake_oneformer_outputs()
A__ : List[Any] =image_processor.post_process_panoptic_segmentation(lowerCAmelCase_ , threshold=0 )
self.assertTrue(len(lowerCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowerCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 713 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__A ):
A__ : Tuple =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
A__ : Union[str, Any] =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__A ):
A__ : Dict =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
A__ : Optional[Any] =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ : List[Any] =AutoTokenizer.from_pretrained(__A )
A__ : Optional[int] =FlaxBertModel.from_pretrained(__A )
A__ : Tuple =tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase_ : Dict ):
return model(**__A )
eval(**__A ).block_until_ready()
@slow
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
A__ : str =AutoTokenizer.from_pretrained(__A )
A__ : str =FlaxRobertaModel.from_pretrained(__A )
A__ : Tuple =tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase_ : Tuple ):
return model(**__A )
eval(**__A ).block_until_ready()
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
A__ : List[Any] =FlaxAutoModel.from_pretrained("""bert-base""" )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__A , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : Union[str, Any] =FlaxAutoModel.from_pretrained(__A , revision="""aaaaaa""" )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
__A , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
A__ : Dict =FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(__A , """Use `from_pt=True` to load this model""" ):
A__ : str =FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 714 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : List[Any] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Any, __snake_case : List[str], __snake_case : Optional[Any]=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : List[str] =""""""
else:
A__ : int ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : List[Any] =state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
A__ : List[Any] =state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Dict =in_proj_weight[
: config.hidden_size, :
]
A__ : Dict =in_proj_bias[: config.hidden_size]
A__ : Any =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : int =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Any =in_proj_weight[
-config.hidden_size :, :
]
A__ : int =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Dict ) -> int:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__, UpperCamelCase__ )
def __lowerCamelCase ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
A__ : int =[
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__, UpperCamelCase__ )
def __lowerCamelCase ( __snake_case : int, __snake_case : str, __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Tuple =dct.pop(UpperCamelCase__ )
A__ : List[str] =val
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
A__ : Union[str, Any] =ViTMSNConfig()
A__ : int =1_000
A__ : Union[str, Any] ="""datasets/huggingface/label-files"""
A__ : Optional[int] ="""imagenet-1k-id2label.json"""
A__ : List[str] =json.load(open(hf_hub_download(UpperCamelCase__, UpperCamelCase__ ), """r""" ) )
A__ : Tuple ={int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A__ : Tuple =idalabel
A__ : Optional[Any] ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A__ : Dict =384
A__ : Dict =1_536
A__ : Union[str, Any] =6
elif "l16" in checkpoint_url:
A__ : int =1_024
A__ : str =4_096
A__ : List[Any] =24
A__ : List[str] =16
A__ : Optional[int] =0.1
elif "b4" in checkpoint_url:
A__ : Tuple =4
elif "l7" in checkpoint_url:
A__ : Union[str, Any] =7
A__ : List[Any] =1_024
A__ : Dict =4_096
A__ : Optional[int] =24
A__ : Union[str, Any] =16
A__ : Dict =0.1
A__ : Dict =ViTMSNModel(UpperCamelCase__ )
A__ : str =torch.hub.load_state_dict_from_url(UpperCamelCase__, map_location="""cpu""" )["""target_encoder"""]
A__ : Optional[Any] =ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase__ )
A__ : Tuple =create_rename_keys(UpperCamelCase__, base_model=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__, UpperCamelCase__, base_model=UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
A__ : Optional[int] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
A__ : Tuple =ViTImageProcessor(
size=config.image_size, image_mean=UpperCamelCase__, image_std=UpperCamelCase__ )
A__ : Dict =image_processor(images=UpperCamelCase__, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
A__ : List[Any] =model(**UpperCamelCase__ )
A__ : Union[str, Any] =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A__ : str =torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
A__ : Dict =torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
A__ : List[Any] =torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
A__ : Optional[Any] =torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
A__ : Union[str, Any] =torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], UpperCamelCase__, atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 715 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : Tuple ) -> Optional[Any]: # This function is recursive
"""simple docstring"""
A__ : Optional[int] =len(_UpperCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A__ : Optional[Any] =array[0]
A__ : Union[str, Any] =False
A__ : str =1
A__ : int =[]
while not is_found and i < array_length:
if array[i] < pivot:
A__ : int =True
A__ : Dict =[element for element in array[i:] if element >= array[i]]
A__ : Optional[Any] =longest_subsequence(_UpperCAmelCase )
if len(_UpperCAmelCase ) > len(_UpperCAmelCase ):
A__ : Union[str, Any] =temp_array
else:
i += 1
A__ : str =[element for element in array[1:] if element >= pivot]
A__ : str =[pivot, *longest_subsequence(_UpperCAmelCase )]
if len(_UpperCAmelCase ) > len(_UpperCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[Any] ='ZinengTang/tvlt-base'
A__ : Optional[Any] =tempfile.mkdtemp()
def lowercase__ ( self : Optional[int] , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **__a )
def lowercase__ ( self : int , **lowerCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =self.get_image_processor()
A__ : Optional[int] =self.get_feature_extractor()
A__ : Optional[int] =TvltProcessor(image_processor=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
A__ : Optional[int] =TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __a )
self.assertIsInstance(processor.image_processor , __a )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =self.get_image_processor()
A__ : Union[str, Any] =self.get_feature_extractor()
A__ : Optional[Any] =TvltProcessor(image_processor=__a , feature_extractor=__a )
A__ : Dict =np.ones([1_20_00] )
A__ : Tuple =feature_extractor(__a , return_tensors="""np""" )
A__ : int =processor(audio=__a , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : Optional[int] =self.get_image_processor()
A__ : Union[str, Any] =self.get_feature_extractor()
A__ : Optional[Any] =TvltProcessor(image_processor=__a , feature_extractor=__a )
A__ : List[str] =np.ones([3, 2_24, 2_24] )
A__ : Union[str, Any] =image_processor(__a , return_tensors="""np""" )
A__ : Optional[Any] =processor(images=__a , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =self.get_image_processor()
A__ : Optional[Any] =self.get_feature_extractor()
A__ : Dict =TvltProcessor(image_processor=__a , feature_extractor=__a )
A__ : Tuple =np.ones([1_20_00] )
A__ : Union[str, Any] =np.ones([3, 2_24, 2_24] )
A__ : List[Any] =processor(audio=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =self.get_image_processor()
A__ : Union[str, Any] =self.get_feature_extractor()
A__ : List[Any] =TvltProcessor(image_processor=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 717 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int = None , lowerCAmelCase_ : int = None ) -> Any:
'''simple docstring'''
super().__init__()
A__ : List[str] =pad_token_id
A__ : Union[str, Any] =max_length
A__ : List[str] =vocab
A__ : List[str] =merges
A__ : Tuple =BytePairTokenizer(__A , __A , sequence_length=__A )
@classmethod
def lowercase__ ( cls : int , lowerCAmelCase_ : GPTaTokenizer , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
A__ : List[str] =[""" """.join(__A ) for m in tokenizer.bpe_ranks.keys()]
A__ : List[Any] =tokenizer.get_vocab()
return cls(__A , __A , *__A , **__A )
@classmethod
def lowercase__ ( cls : Optional[int] , lowerCAmelCase_ : Union[str, os.PathLike] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =GPTaTokenizer.from_pretrained(__A , *__A , **__A )
return cls.from_tokenizer(__A , *__A , **__A )
@classmethod
def lowercase__ ( cls : int , lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
return cls(**__A )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int = None ) -> Dict:
'''simple docstring'''
A__ : List[Any] =self.tf_tokenizer(__A )
A__ : Optional[int] =tf.ones_like(__A )
if self.pad_token_id is not None:
# pad the tokens up to max length
A__ : Optional[int] =max_length if max_length is not None else self.max_length
if max_length is not None:
A__ , A__ : Union[str, Any] =pad_model_inputs(
__A , max_seq_length=__A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
A__ : List[str] ="""A painting of a squirrel eating a burger"""
A__ : Any =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : Optional[int] =sd_pipe.prepare_inputs(_lowercase )
A__ : Optional[Any] =replicate(_lowercase )
A__ : Any =shard(_lowercase )
A__ : List[Any] =jax.random.PRNGKey(0 )
A__ : Dict =jax.random.split(_lowercase , jax.device_count() )
A__ : List[str] =sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
A__ : Optional[int] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ : int =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Union[str, Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : str =jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : str ="""stabilityai/stable-diffusion-2"""
A__ : int =FlaxDPMSolverMultistepScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
A__ : Any =FlaxStableDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , revision="""bf16""" , dtype=jnp.bfloataa , )
A__ : List[str] =scheduler_params
A__ : List[str] ="""A painting of a squirrel eating a burger"""
A__ : List[str] =jax.device_count()
A__ : Dict =num_samples * [prompt]
A__ : str =sd_pipe.prepare_inputs(_lowercase )
A__ : Dict =replicate(_lowercase )
A__ : Tuple =shard(_lowercase )
A__ : Tuple =jax.random.PRNGKey(0 )
A__ : Dict =jax.random.split(_lowercase , jax.device_count() )
A__ : Union[str, Any] =sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
A__ : Optional[Any] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ : Any =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : List[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : List[str] =jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 719 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : List[str] = 25_6047
__snake_case : Union[str, Any] = 25_6145
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
__snake_case = NllbTokenizer
__snake_case = NllbTokenizerFast
__snake_case = True
__snake_case = True
__snake_case = {}
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =NllbTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
A__ : Optional[int] =NllbTokenizer(A__ , keep_accents=A__ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Dict =tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : int =(self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Tuple =self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[Any] =self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[int] =tempfile.mkdtemp()
A__ : str =tokenizer_r.save_pretrained(A__ )
A__ : str =tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : str =tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] =tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
A__ : str =tempfile.mkdtemp()
A__ : Optional[Any] =tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Union[str, Any] =tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : Optional[int] =tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] =tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
A__ : Dict =tempfile.mkdtemp()
A__ : int =tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Optional[int] =tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : int =tokenizer_r.from_pretrained(A__ )
A__ : str =tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
if not self.test_seqaseq:
return
A__ : List[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
A__ : Tuple =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
A__ : Tuple =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
A__ : Dict =tokenizer.prepare_seqaseq_batch(
src_texts=A__ , tgt_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A__ : Optional[int] =tokenizer.prepare_seqaseq_batch(
A__ , tgt_texts=A__ , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A__ : List[Any] =tokenizer.prepare_seqaseq_batch(
src_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , A__ )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : int =[AddedToken("""<special>""" , lstrip=A__ )]
A__ : List[str] =self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple =tokenizer_r.encode("""Hey this is a <special> token""" )
A__ : int =tokenizer_r.encode("""<special>""" , add_special_tokens=A__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ : Dict =self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ , )
A__ : Union[str, Any] =self.tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple =tokenizer_p.encode("""Hey this is a <special> token""" )
A__ : Any =tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
__snake_case = '''facebook/nllb-200-distilled-600M'''
__snake_case = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__snake_case = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__snake_case = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def lowercase__ ( cls : Dict ) -> Tuple:
'''simple docstring'''
A__ : NllbTokenizer =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
A__ : Any =1
return cls
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : List[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(A__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : Union[str, Any] =[RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
A__ : List[Any] =self.tokenizer.decode(A__ , skip_special_tokens=A__ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A__ )
A__ : int =10
A__ : List[Any] =self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A__ )
self.assertEqual(len(A__ ) , A__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
A__ : str =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
A__ : List[str] =NllbTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Optional[Any] =shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A__ : int =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(A__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors="""pt""" )
A__ : str =self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[int] =targets["""input_ids"""]
A__ : Dict =shift_tokens_right(
A__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : List[Any] =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_60_47, 70, 73_56, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_60_57,
} , )
@require_torch
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ : str =True
A__ : int =self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
A__ : Any =False
A__ : Any =self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 720 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 0 |
'''simple docstring'''
class lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str ) -> str:
'''simple docstring'''
A__ : List[Any] =[
[],
[],
[],
]
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(_a )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : List[str] ) -> Any:
'''simple docstring'''
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ : Any =[]
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if len(self.queue ) == 1_00:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(_a )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
A__ : List[str] =min(self.queue )
self.queue.remove(_a )
return data
def __str__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return str(self.queue )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : Optional[int] =FixedPriorityQueue()
fpq.enqueue(0, 10 )
fpq.enqueue(1, 70 )
fpq.enqueue(0, 100 )
fpq.enqueue(2, 1 )
fpq.enqueue(2, 5 )
fpq.enqueue(1, 7 )
fpq.enqueue(2, 4 )
fpq.enqueue(1, 64 )
fpq.enqueue(0, 128 )
print(lowerCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ : Tuple =ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowerCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 721 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInstructPixaPixPipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A__ : Any =PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
torch.manual_seed(0 )
A__ : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A__ : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A__ : Optional[Any] =CLIPTextModel(UpperCamelCase__ )
A__ : List[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=0 ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Tuple =Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
A__ : str =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Union[str, Any] =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Any ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : str =self.get_dummy_components()
A__ : Optional[Any] =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ : int =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] =self.get_dummy_inputs(UpperCamelCase__ )
A__ : Tuple =sd_pipe(**UpperCamelCase__ ).images
A__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Optional[int] =np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ : List[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Tuple =self.get_dummy_components()
A__ : str =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ : List[str] =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : str =self.get_dummy_inputs(UpperCamelCase__ )
A__ : Tuple ="""french fries"""
A__ : Tuple =sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
A__ : Union[str, Any] =output.images
A__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Any =np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[Any] =self.get_dummy_components()
A__ : Tuple =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ : Any =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Tuple =self.get_dummy_inputs(UpperCamelCase__ )
A__ : Any =[inputs["""prompt"""]] * 2
A__ : Tuple =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
A__ : List[Any] =torch.from_numpy(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A__ : List[str] =image / 2 + 0.5
A__ : Any =image.permute(0 , 3 , 1 , 2 )
A__ : Optional[int] =image.repeat(2 , 1 , 1 , 1 )
A__ : Union[str, Any] =sd_pipe(**UpperCamelCase__ ).images
A__ : Optional[int] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A__ : Any =np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ : int ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[Any] =self.get_dummy_components()
A__ : Tuple =EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
A__ : int =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ : Tuple =sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Any =self.get_dummy_inputs(UpperCamelCase__ )
A__ : Tuple =sd_pipe(**UpperCamelCase__ ).images
A__ : Tuple =image[0, -3:, -3:, -1]
A__ : Tuple =[round(UpperCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(UpperCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A__ : Optional[int] =np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_dummy_components()
A__ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ : int =VaeImageProcessor(do_resize=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
A__ : str =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Dict =pipe(**self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="""pt""" ) )[0]
A__ : List[str] =components["""vae"""]
A__ : str =self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A__ : List[str] =vae.encode(inputs[image_param] ).latent_dist.mode()
A__ : Any =pipe(**UpperCamelCase__ )[0]
A__ : Union[str, Any] =np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase__ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Tuple=0 ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =torch.manual_seed(UpperCamelCase__ )
A__ : Tuple =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
A__ : int ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] =self.get_inputs()
A__ : Union[str, Any] =pipe(**UpperCamelCase__ ).images
A__ : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : str =np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ )
A__ : Dict =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : Optional[int] =self.get_inputs()
A__ : Any =pipe(**UpperCamelCase__ ).images
A__ : int =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ )
A__ : Any =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : Dict =self.get_inputs()
A__ : Optional[int] =pipe(**UpperCamelCase__ ).images
A__ : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : List[str] =np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any =0
def callback_fn(lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> None:
A__ : List[Any] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A__ : Optional[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ : int =latents[0, -3:, -3:, -1]
A__ : List[str] =np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A__ : str =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ : Optional[int] =latents[0, -3:, -3:, -1]
A__ : Tuple =np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A__ : int =False
A__ : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A__ : List[str] =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : Optional[Any] =self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A__ : Any =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : List[Any] =self.get_inputs()
A__ : Dict =pipe(**UpperCamelCase__ )
A__ : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : List[str] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A__ : List[Any] =inputs["""image"""].resize((5_04, 5_04) )
A__ : str ="""timbrooks/instruct-pix2pix"""
A__ : List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : Optional[int] =pipe(**UpperCamelCase__ )
A__ : int =output.images[0]
A__ : int =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
A__ : Union[str, Any] =np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case : Tuple = """base_with_context"""
def __lowerCamelCase ( __snake_case : int, __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[str] =nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
A__ : List[str] =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=__snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
A__ : Union[str, Any] =weights[f"layers_{lyr_num}"]
A__ : Dict =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
A__ : Any =ly_weight["""attention"""]
A__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : int =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A__ : int =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A__ : str =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ : List[Any] =nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
A__ : Dict =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=__snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
A__ : List[str] =weights[f"layers_{lyr_num}"]
A__ : Optional[Any] =ly_weight["""attention"""]
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
A__ : Any =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A__ : List[str] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A__ : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A__ : int =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A__ : Optional[int] =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCamelCase ( __snake_case : Any, __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=__snake_case )
A__ : int =nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A__ : Union[str, Any] =weights[f"layers_{lyr_num}"]
A__ : Optional[int] =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
A__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
A__ : str =ly_weight["""self_attention"""]
A__ : str =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : Union[str, Any] =ly_weight["""MultiHeadDotProductAttention_0"""]
A__ : Dict =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A__ : Dict =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
A__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A__ : List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A__ : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A__ : Dict =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
A__ : str =nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __lowerCamelCase ( __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
A__ : Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
A__ : str =jnp.tree_util.tree_map(onp.array, __snake_case )
A__ : Any =[
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
A__ : Optional[Any] =os.path.join(args.checkpoint_path, """..""", """config.gin""" )
A__ : Tuple =inference.parse_training_gin_file(__snake_case, __snake_case )
A__ : List[Any] =inference.InferenceModel(args.checkpoint_path, __snake_case )
A__ : Optional[int] =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""", variance_type="""fixed_large""" )
A__ : int =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="""gated-gelu""", )
A__ : List[str] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["""targets_context"""], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="""gated-gelu""", )
A__ : Optional[int] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["""targets_context"""], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
A__ : Tuple =load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""], __snake_case )
A__ : List[Any] =load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""], __snake_case )
A__ : int =load_decoder(ta_checkpoint["""target"""]["""decoder"""], __snake_case )
A__ : Tuple =OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
A__ : Dict =SpectrogramDiffusionPipeline(
notes_encoder=__snake_case, continuous_encoder=__snake_case, decoder=__snake_case, scheduler=__snake_case, melgan=__snake_case, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
__snake_case : Dict = parser.parse_args()
main(args)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def lowercase__ ( self : Any , lowerCAmelCase_ : Any=0 ) -> Dict:
'''simple docstring'''
A__ : Any =floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowercase__ ) )
A__ : int =np.random.RandomState(lowercase__ )
A__ : Union[str, Any] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Optional[Any] =self.get_dummy_inputs()
A__ : str =pipe(**lowercase__ ).images
A__ : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A__ : Optional[int] =np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : List[Any] =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Tuple =self.get_dummy_inputs()
A__ : Any =pipe(**lowercase__ ).images
A__ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ : List[str] =np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : Any =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
# warmup pass to apply optimizations
A__ : List[str] =pipe(**self.get_dummy_inputs() )
A__ : Any =self.get_dummy_inputs()
A__ : Tuple =pipe(**lowercase__ ).images
A__ : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ : Optional[Any] =np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : List[Any] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Optional[int] =self.get_dummy_inputs()
A__ : Any =pipe(**lowercase__ ).images
A__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ : Tuple =np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ : Any =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : str =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
A__ : List[str] =self.get_dummy_inputs()
A__ : List[Any] =pipe(**lowercase__ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ : int =np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : str =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : List[str] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Tuple =self.get_dummy_inputs()
A__ : Any =pipe(**lowercase__ ).images
A__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ : Optional[Any] =np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =ort.SessionOptions()
A__ : Tuple =False
return options
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : Dict =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ : Optional[Any] =init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A__ : Dict =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
A__ : int ="A fantasy landscape, trending on artstation"
A__ : int =np.random.RandomState(0 )
A__ : Union[str, Any] =pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type="""np""" , )
A__ : Optional[int] =output.images
A__ : Any =images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A__ : List[Any] =np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ : Any =init_image.resize((7_68, 5_12) )
A__ : Optional[Any] =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A__ : List[str] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Union[str, Any] ="A fantasy landscape, trending on artstation"
A__ : Union[str, Any] =np.random.RandomState(0 )
A__ : Optional[Any] =pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase__ , output_type="""np""" , )
A__ : Union[str, Any] =output.images
A__ : Optional[Any] =images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A__ : str =np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 702 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Optional[Any], __snake_case : str ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A__ : Optional[int] =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A__ : Any =float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 703 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
A__ : Tuple =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : str =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =''
else:
A__ : Tuple ='vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Any =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : str =in_proj_weight[
: config.hidden_size, :
]
A__ : Optional[Any] =in_proj_bias[: config.hidden_size]
A__ : List[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Any =in_proj_weight[
-config.hidden_size :, :
]
A__ : Union[str, Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Dict ) -> Any:
"""simple docstring"""
A__ : List[str] =['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowercase, __lowercase )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =dct.pop(__lowercase )
A__ : str =val
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[int] ='http://images.cocodataset.org/val2017/000000039769.jpg'
A__ : int =Image.open(requests.get(__lowercase, stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : str ) -> Tuple:
"""simple docstring"""
A__ : Optional[Any] =ViTConfig()
A__ : str =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A__ : Optional[int] =True
A__ : List[Any] =int(vit_name[-12:-10] )
A__ : List[str] =int(vit_name[-9:-6] )
else:
A__ : Optional[Any] =1_000
A__ : str ='huggingface/label-files'
A__ : Optional[int] ='imagenet-1k-id2label.json'
A__ : Any =json.load(open(hf_hub_download(__lowercase, __lowercase, repo_type="""dataset""" ), """r""" ) )
A__ : int ={int(__lowercase ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : Any ={v: k for k, v in idalabel.items()}
A__ : Optional[Any] =int(vit_name[-6:-4] )
A__ : Tuple =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
A__ : str =192
A__ : List[Any] =768
A__ : int =12
A__ : Dict =3
elif vit_name[9:].startswith("""small""" ):
A__ : str =384
A__ : List[str] =1_536
A__ : Dict =12
A__ : Any =6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
A__ : Union[str, Any] =768
A__ : Optional[Any] =2_304
A__ : int =8
A__ : int =8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
A__ : Dict =1_024
A__ : List[Any] =4_096
A__ : Tuple =24
A__ : Union[str, Any] =16
elif vit_name[4:].startswith("""huge""" ):
A__ : Tuple =1_280
A__ : Optional[Any] =5_120
A__ : List[Any] =32
A__ : Optional[Any] =16
# load original model from timm
A__ : Dict =timm.create_model(__lowercase, pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[Any] =timm_model.state_dict()
if base_model:
remove_classification_head_(__lowercase )
A__ : Optional[int] =create_rename_keys(__lowercase, __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase, __lowercase, __lowercase )
read_in_q_k_v(__lowercase, __lowercase, __lowercase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ : Union[str, Any] =ViTModel(__lowercase ).eval()
else:
A__ : Any =ViTForImageClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A__ : Optional[Any] =DeiTImageProcessor(size=config.image_size )
else:
A__ : Tuple =ViTImageProcessor(size=config.image_size )
A__ : List[str] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : List[Any] =encoding['pixel_values']
A__ : Union[str, Any] =model(__lowercase )
if base_model:
A__ : int =timm_model.forward_features(__lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowercase, outputs.pooler_output, atol=1E-3 )
else:
A__ : Union[str, Any] =timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase, outputs.logits, atol=1E-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCamelCase ( enum.Enum ):
'''simple docstring'''
__snake_case = 0
__snake_case = 1
__snake_case = 2
@add_end_docstrings(__lowercase )
class lowerCamelCase ( __lowercase ):
'''simple docstring'''
__snake_case = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Tuple , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ : Optional[Any] =None
if self.model.config.prefix is not None:
A__ : Dict =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ : int =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ : Dict =self._sanitize_parameters(prefix=_A , **self._forward_params )
A__ : Union[str, Any] ={**self._preprocess_params, **preprocess_params}
A__ : int ={**self._forward_params, **forward_params}
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
A__ : str ={}
if prefix is not None:
A__ : Union[str, Any] =prefix
if prefix:
A__ : str =self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
A__ : List[str] =prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, \'hole\']""" )
A__ : Union[str, Any] =handle_long_generation
preprocess_params.update(_A )
A__ : Dict =generate_kwargs
A__ : List[Any] ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
A__ : Optional[int] =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
A__ : Optional[int] =ReturnType.TENSORS
if return_type is not None:
A__ : str =return_type
if clean_up_tokenization_spaces is not None:
A__ : str =clean_up_tokenization_spaces
if stop_sequence is not None:
A__ : List[str] =self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
A__ : int =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase__ ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int="" , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
A__ : Optional[Any] =prompt_text
if handle_long_generation == "hole":
A__ : Dict =inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ : str =generate_kwargs["""max_new_tokens"""]
else:
A__ : List[Any] =generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ : int =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
A__ : str =inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
A__ : Any =inputs["""attention_mask"""][:, -keep_length:]
return inputs
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ) -> Dict:
'''simple docstring'''
A__ : Any =model_inputs["""input_ids"""]
A__ : List[str] =model_inputs.get("""attention_mask""" , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ : Optional[int] =None
A__ : Dict =None
A__ : List[str] =1
else:
A__ : Optional[Any] =input_ids.shape[0]
A__ : Optional[Any] =model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ : Optional[int] =generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
A__ : Optional[int] ="""max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ : List[str] =generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ : int ="""min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ : List[str] =self.model.generate(input_ids=_A , attention_mask=_A , **_A )
A__ : Dict =generated_sequence.shape[0]
if self.framework == "pt":
A__ : Tuple =generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
A__ : Optional[int] =tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=ReturnType.FULL_TEXT , lowerCAmelCase_ : Dict=True ) -> Any:
'''simple docstring'''
A__ : Any =model_outputs["""generated_sequence"""][0]
A__ : Optional[Any] =model_outputs["""input_ids"""]
A__ : str =model_outputs["""prompt_text"""]
A__ : List[str] =generated_sequence.numpy().tolist()
A__ : str =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ : List[str] ={"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ : Optional[Any] =self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ : List[Any] =0
else:
A__ : int =len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
A__ : Dict =prompt_text + text[prompt_length:]
else:
A__ : Tuple =text[prompt_length:]
A__ : Optional[Any] ={"""generated_text""": all_text}
records.append(_A )
return records
| 705 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : str = '''▁'''
__snake_case : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
__snake_case : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
__snake_case : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
__snake_case : Tuple = logging.get_logger(__name__)
class lowerCamelCase ( __a ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : str="<pad>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : Dict="<unk>" , lowerCAmelCase_ : int="<mask_2>" , lowerCAmelCase_ : Union[str, Any]="<mask_1>" , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=1_03 , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : List[str] , ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(snake_case__ )}, but is"
f" {type(snake_case__ )}" )
A__ : str =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
A__ : int =additional_special_tokens_extended
else:
A__ : Tuple =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
A__ : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , pad_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
A__ : int =mask_token_sent
A__ : Optional[int] =vocab_file
A__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# add special tokens to encoder dict
A__ : str ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
A__ : Union[str, Any] ={v: k for k, v in self.encoder.items()}
@property
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[Any] ={self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =self.__dict__.copy()
A__ : Optional[int] =None
return state
def __setstate__( self : List[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Tuple ={}
A__ : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : Any , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A__ : List[str] =self.sp_model.piece_to_id(snake_case__ )
return sp_id + self.offset
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A__ : int =self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A__ : Dict =[]
A__ : Tuple =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
A__ : Any =[]
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def lowercase__ ( self : int , lowerCAmelCase_ : str=False ) -> Union[str, Any]:
'''simple docstring'''
return 1
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ : List[str] =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : str , lowerCAmelCase_ : List , lowerCAmelCase_ : Optional[List] = None , lowerCAmelCase_ : bool = False ) -> str:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str=None ) -> str:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : int =os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , """wb""" ) as fi:
A__ : str =self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 706 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : Any, __snake_case : str, __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : str, ) -> Optional[Any]:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Optional[int], __snake_case : Any, ) -> List[str]:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCAmelCase_, nominal_annual_percentage_rate / 365, number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : int = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
__snake_case = 'data2vec-vision'
def __init__( self : str , lowerCAmelCase_ : Optional[int]=7_68 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Any=30_72 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_24 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=[3, 5, 7, 11] , lowerCAmelCase_ : str=[1, 2, 3, 6] , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=0.4 , lowerCAmelCase_ : Optional[Any]=2_56 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Any=2_55 , **lowerCAmelCase_ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : Optional[int] =hidden_size
A__ : Optional[int] =num_hidden_layers
A__ : Union[str, Any] =num_attention_heads
A__ : Union[str, Any] =intermediate_size
A__ : Optional[Any] =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : Any =attention_probs_dropout_prob
A__ : int =initializer_range
A__ : Tuple =layer_norm_eps
A__ : Union[str, Any] =image_size
A__ : str =patch_size
A__ : Union[str, Any] =num_channels
A__ : Union[str, Any] =use_mask_token
A__ : Union[str, Any] =use_absolute_position_embeddings
A__ : Optional[Any] =use_relative_position_bias
A__ : str =use_shared_relative_position_bias
A__ : Optional[Any] =layer_scale_init_value
A__ : Tuple =drop_path_rate
A__ : Tuple =use_mean_pooling
# decode head attributes (semantic segmentation)
A__ : int =out_indices
A__ : List[Any] =pool_scales
# auxiliary head attributes (semantic segmentation)
A__ : Dict =use_auxiliary_head
A__ : Any =auxiliary_loss_weight
A__ : List[str] =auxiliary_channels
A__ : Optional[int] =auxiliary_num_convs
A__ : Dict =auxiliary_concat_input
A__ : Any =semantic_loss_ignore_index
class lowerCamelCase ( lowercase_ ):
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return 1e-4
| 708 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
if gpta_config_file == "":
A__ : Tuple =GPTaConfig()
else:
A__ : List[str] =GPTaConfig.from_json_file(_A )
A__ : str =GPTaModel(_A )
# Load weights from numpy
load_tf_weights_in_gpta(_A, _A, _A )
# Save pytorch-model
A__ : Optional[Any] =pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Union[str, Any] =pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict(), _A )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__snake_case : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Any = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : List[Any] =PegasusTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : int ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] ="""</s>"""
A__ : Any =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(UpperCamelCase__ ) , 11_03 )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : Tuple =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A__ : Tuple =self.tokenizer_class.from_pretrained(self.tmpdirname )
A__ : int =(
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
A__ : Optional[Any] =rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A__ : Dict =py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
A__ : Any ="""<mask_1> To ensure a <mask_2> flow of bank resolutions."""
A__ : Tuple =[2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
A__ : Tuple =tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
A__ : Dict ="""To ensure a smooth flow of bank resolutions."""
A__ : Optional[int] =[4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
A__ : int =tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =["""This is going to be way too long.""" * 1_50, """short example"""]
A__ : Dict =["""not super long but more than 5 tokens""", """tiny"""]
A__ : Optional[Any] =self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
A__ : Tuple =self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] ={"""input_ids""": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Any =PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowercase__ ( self : str , **lowerCAmelCase_ : int ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A__ : List[str] =self.tokenizer_class.from_pretrained(self.tmpdirname )
A__ : str =(
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
A__ : str =rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A__ : Dict =py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ : Optional[int] =["""This is going to be way too long.""" * 10_00, """short example"""]
A__ : Any =["""not super long but more than 5 tokens""", """tiny"""]
A__ : Optional[int] =self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
A__ : List[Any] =self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : List[Any] =(
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
A__ : Optional[int] =self._large_tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(
UpperCamelCase__ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 710 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 0 |
'''simple docstring'''
__snake_case : Any = '''Input must be a string of 8 numbers plus letter'''
__snake_case : Optional[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __lowerCamelCase ( __snake_case : List[Any] ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case, __snake_case ):
A__ : List[str] =f"Expected string as input, found {type(__snake_case ).__name__}"
raise TypeError(__snake_case )
A__ : List[Any] =spanish_id.replace("""-""", """""" ).upper()
if len(__snake_case ) != 9:
raise ValueError(__snake_case )
try:
A__ : Any =int(spanish_id_clean[0:8] )
A__ : Tuple =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__snake_case ) from ex
if letter.isdigit():
raise ValueError(__snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Any, __snake_case : Tuple, __snake_case : Union[str, Any] ) -> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A__ , A__ : Optional[Any] =array[indexa], array[indexa]
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[int], __snake_case : Dict, __snake_case : Union[str, Any] ) -> None:
"""simple docstring"""
if length > 1:
A__ : Optional[Any] =int(length / 2 )
for i in range(__snake_case, low + middle ):
comp_and_swap(__snake_case, __snake_case, i + middle, __snake_case )
bitonic_merge(__snake_case, __snake_case, __snake_case, __snake_case )
bitonic_merge(__snake_case, low + middle, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str], __snake_case : Optional[int], __snake_case : Dict ) -> None:
"""simple docstring"""
if length > 1:
A__ : int =int(length / 2 )
bitonic_sort(__snake_case, __snake_case, __snake_case, 1 )
bitonic_sort(__snake_case, low + middle, __snake_case, 0 )
bitonic_merge(__snake_case, __snake_case, __snake_case, __snake_case )
if __name__ == "__main__":
__snake_case : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 712 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case : str = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['ConvNextFeatureExtractor']
__snake_case : Tuple = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 713 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__snake_case : str = 25_0004
__snake_case : Tuple = 25_0020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = MBartTokenizer
__snake_case = MBartTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : List[str] =MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Dict =MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
A__ : List[str] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : List[Any] =tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A__ : Tuple =tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ : Any =(self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : str =self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
A__ : Optional[Any] =self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
A__ : int =tempfile.mkdtemp()
A__ : Dict =tokenizer_r.save_pretrained(UpperCamelCase_ )
A__ : Optional[Any] =tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
A__ : List[str] =tokenizer_r.from_pretrained(UpperCamelCase_ )
A__ : Dict =tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
A__ : Optional[Any] =tempfile.mkdtemp()
A__ : Any =tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
A__ : Union[str, Any] =tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
A__ : List[str] =tokenizer_r.from_pretrained(UpperCamelCase_ )
A__ : Optional[int] =tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
A__ : Any =tempfile.mkdtemp()
A__ : List[str] =tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
A__ : List[Any] =tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : Union[str, Any] =tokenizer_r.from_pretrained(UpperCamelCase_ )
A__ : Optional[int] =tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'facebook/mbart-large-en-ro'
__snake_case = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__snake_case = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__snake_case = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : Any =MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
A__ : Dict =1
return cls
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
A__ : List[str] =[RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
A__ : Any =self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
A__ : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , UpperCamelCase_ )
A__ : List[Any] =10
A__ : Optional[int] =self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =tempfile.mkdtemp()
A__ : Optional[Any] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
A__ : Tuple =MBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="""pt""" )
A__ : List[Any] =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Union[str, Any] =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
A__ : Tuple =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : Optional[int] =self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : List[str] =self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : Optional[int] =shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : List[str] =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 714 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__snake_case = 'AutoTokenizer'
__snake_case = ['tokenizer']
__snake_case = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict=None ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =speaker_embeddings
@classmethod
def lowercase__ ( cls : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple="speaker_embeddings_path.json" , **lowerCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
A__ : List[str] =get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , subfolder=kwargs.pop("""subfolder""" , lowerCAmelCase_ ) , cache_dir=kwargs.pop("""cache_dir""" , lowerCAmelCase_ ) , force_download=kwargs.pop("""force_download""" , lowerCAmelCase_ ) , proxies=kwargs.pop("""proxies""" , lowerCAmelCase_ ) , resume_download=kwargs.pop("""resume_download""" , lowerCAmelCase_ ) , local_files_only=kwargs.pop("""local_files_only""" , lowerCAmelCase_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowerCAmelCase_ ) , revision=kwargs.pop("""revision""" , lowerCAmelCase_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
A__ : Optional[int] =None
else:
with open(lowerCAmelCase_ ) as speaker_embeddings_json:
A__ : Tuple =json.load(lowerCAmelCase_ )
else:
A__ : List[str] =None
A__ : Tuple =AutoTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(tokenizer=lowerCAmelCase_ , speaker_embeddings=lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]="speaker_embeddings_path.json" , lowerCAmelCase_ : str="speaker_embeddings" , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Tuple , ) -> Any:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , """v2""" ) , exist_ok=lowerCAmelCase_ )
A__ : Tuple ={}
A__ : Tuple =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A__ : Dict =self._load_voice_preset(lowerCAmelCase_ )
A__ : Optional[int] ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowerCAmelCase_ , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=lowerCAmelCase_ , )
A__ : Dict =os.path.join(lowerCAmelCase_ , f"{prompt_key}_{key}.npy" )
A__ : Tuple =tmp_dict
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , """w""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : str = None , **lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : List[str] =self.speaker_embeddings[voice_preset]
A__ : Optional[int] ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
A__ : Tuple =get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowerCAmelCase_ ) , cache_dir=kwargs.pop("""cache_dir""" , lowerCAmelCase_ ) , force_download=kwargs.pop("""force_download""" , lowerCAmelCase_ ) , proxies=kwargs.pop("""proxies""" , lowerCAmelCase_ ) , resume_download=kwargs.pop("""resume_download""" , lowerCAmelCase_ ) , local_files_only=kwargs.pop("""local_files_only""" , lowerCAmelCase_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowerCAmelCase_ ) , revision=kwargs.pop("""revision""" , lowerCAmelCase_ ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
A__ : Optional[int] =np.load(lowerCAmelCase_ )
return voice_preset_dict
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[dict] = None ) -> Tuple:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self : Any , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Dict="pt" , lowerCAmelCase_ : str=2_56 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=False , **lowerCAmelCase_ : List[str] , ) -> str:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A__ : Dict =self._load_voice_preset(lowerCAmelCase_ )
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not voice_preset.endswith(""".npz""" ):
A__ : Tuple =voice_preset + ".npz"
A__ : Dict =np.load(lowerCAmelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : List[str] =BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
A__ : Dict =self.tokenizer(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding="""max_length""" , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
if voice_preset is not None:
A__ : int =voice_preset
return encoded_text
| 715 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = VQModel
__snake_case = 'sample'
@property
def lowercase__ ( self : int , lowerCAmelCase_ : Any=(32, 32) ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =4
A__ : List[str] =3
A__ : Dict =floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
return {"sample": image}
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : List[str] ={
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
A__ : List[Any] =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
A__ , A__ : Tuple =VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase_ )
A__ : Optional[int] =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowercase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
A__ : Dict =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
A__ : Tuple =image.to(lowercase_ )
with torch.no_grad():
A__ : Optional[int] =model(lowercase_ ).sample
A__ : str =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A__ : Tuple =torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
| 716 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Dict=99 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Any=37 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]="None" , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=None , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =parent
A__ : List[Any] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : str =use_input_mask
A__ : List[Any] =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : str =vocab_size
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : int =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : List[str] =type_vocab_size
A__ : List[str] =type_sequence_label_size
A__ : List[Any] =initializer_range
A__ : List[str] =num_labels
A__ : Any =num_choices
A__ : Union[str, Any] =relative_attention
A__ : int =position_biased_input
A__ : Any =pos_att_type
A__ : List[str] =scope
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[Any] =None
if self.use_input_mask:
A__ : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ : str =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =None
A__ : str =None
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any =ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : int , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
A__ : Dict =DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
A__ : Dict =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
A__ : List[Any] =model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Any:
'''simple docstring'''
A__ : str =self.num_labels
A__ : Union[str, Any] =DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
'''simple docstring'''
A__ : Tuple =self.num_labels
A__ : Tuple =DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
A__ : Any =DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : List[str] =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : int =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =DebertaVaModelTester(self )
A__ : List[Any] =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] =DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
A__ : List[Any] =torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ : Union[str, Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : Optional[int] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"{output[:, 1:4, 1:4]}" )
| 717 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
import math
def __lowerCamelCase ( __snake_case : int, __snake_case : int ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 719 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__snake_case : Dict = logging.get_logger(__name__)
class lowerCamelCase ( _snake_case ):
'''simple docstring'''
__snake_case = """AutoTokenizer"""
__snake_case = ["""tokenizer"""]
__snake_case = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=None ) -> int:
'''simple docstring'''
super().__init__(snake_case_ )
A__ : List[str] =speaker_embeddings
@classmethod
def lowercase__ ( cls : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any="speaker_embeddings_path.json" , **lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
A__ : List[Any] =get_file_from_repo(
snake_case_ , snake_case_ , subfolder=kwargs.pop("""subfolder""" , snake_case_ ) , cache_dir=kwargs.pop("""cache_dir""" , snake_case_ ) , force_download=kwargs.pop("""force_download""" , snake_case_ ) , proxies=kwargs.pop("""proxies""" , snake_case_ ) , resume_download=kwargs.pop("""resume_download""" , snake_case_ ) , local_files_only=kwargs.pop("""local_files_only""" , snake_case_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , snake_case_ ) , revision=kwargs.pop("""revision""" , snake_case_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(snake_case_ , snake_case_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
A__ : Dict =None
else:
with open(snake_case_ ) as speaker_embeddings_json:
A__ : int =json.load(snake_case_ )
else:
A__ : int =None
A__ : Dict =AutoTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(tokenizer=snake_case_ , speaker_embeddings=snake_case_ )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]="speaker_embeddings_path.json" , lowerCAmelCase_ : Optional[Any]="speaker_embeddings" , lowerCAmelCase_ : Tuple = False , **lowerCAmelCase_ : int , ) -> List[Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case_ , snake_case_ , """v2""" ) , exist_ok=snake_case_ )
A__ : int ={}
A__ : int =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A__ : Union[str, Any] =self._load_voice_preset(snake_case_ )
A__ : List[str] ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , snake_case_ , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=snake_case_ , )
A__ : Optional[int] =os.path.join(snake_case_ , f"{prompt_key}_{key}.npy" )
A__ : Union[str, Any] =tmp_dict
with open(os.path.join(snake_case_ , snake_case_ ) , """w""" ) as fp:
json.dump(snake_case_ , snake_case_ )
super().save_pretrained(snake_case_ , snake_case_ , **snake_case_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] = None , **lowerCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
A__ : Dict =self.speaker_embeddings[voice_preset]
A__ : Optional[Any] ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
A__ : str =get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , snake_case_ ) , cache_dir=kwargs.pop("""cache_dir""" , snake_case_ ) , force_download=kwargs.pop("""force_download""" , snake_case_ ) , proxies=kwargs.pop("""proxies""" , snake_case_ ) , resume_download=kwargs.pop("""resume_download""" , snake_case_ ) , local_files_only=kwargs.pop("""local_files_only""" , snake_case_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , snake_case_ ) , revision=kwargs.pop("""revision""" , snake_case_ ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
A__ : str =np.load(snake_case_ )
return voice_preset_dict
def lowercase__ ( self : str , lowerCAmelCase_ : List[str] = None ) -> Any:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self : Dict , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]="pt" , lowerCAmelCase_ : List[str]=2_56 , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=False , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
if voice_preset is not None and not isinstance(snake_case_ , snake_case_ ):
if (
isinstance(snake_case_ , snake_case_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A__ : Any =self._load_voice_preset(snake_case_ )
else:
if isinstance(snake_case_ , snake_case_ ) and not voice_preset.endswith(""".npz""" ):
A__ : Any =voice_preset + ".npz"
A__ : List[Any] =np.load(snake_case_ )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case_ , **snake_case_ )
A__ : List[Any] =BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
A__ : Tuple =self.tokenizer(
snake_case_ , return_tensors=snake_case_ , padding="""max_length""" , max_length=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
if voice_preset is not None:
A__ : Union[str, Any] =voice_preset
return encoded_text
| 721 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__snake_case : int = logging.get_logger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
import requests
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : str ) -> str:
"""simple docstring"""
A__ : Tuple ={"""Content-Type""": """application/json"""}
A__ : Optional[Any] =requests.post(_lowerCAmelCase, json={"""text""": message_body}, headers=_lowerCAmelCase )
if response.status_code != 200:
A__ : str =(
"""Request to slack returned an error """
f"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__snake_case : Optional[int] = logging.getLogger()
__snake_case : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
A__ : Any ={'''source''': '''What is love ?''', '''target''': '''life'''}
A__ : Union[str, Any] ={'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
A__ : Dict ='''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowerCamelCase , f"{split}.{field}" ) , """w""" ) as f:
f.write(__lowerCamelCase )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any = "pytorch" ) -> Dict:
'''simple docstring'''
A__ : Dict =self.get_auto_remove_tmp_dir()
A__ : str =os.path.join(__lowerCamelCase , """output""" )
A__ : Optional[Any] =os.path.join(__lowerCamelCase , """data""" )
self._create_dummy_data(data_dir=__lowerCamelCase )
A__ : Dict =f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
A__ : str =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
A__ : Optional[int] =os.path.join(__lowerCamelCase , """metrics.json""" )
with open(__lowerCamelCase ) as f:
A__ : Dict =json.load(__lowerCamelCase )
return result
@require_torch_gpu
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
A__ : List[str] =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 702 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = BioGptTokenizer
__snake_case = False
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : Optional[int] =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ : str =dict(zip(_lowercase , range(len(_lowercase ) ) ) )
A__ : Union[str, Any] =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowercase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : int ="""lower newer"""
A__ : int ="""lower newer"""
return input_text, output_text
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Tuple =BioGptTokenizer(self.vocab_file , self.merges_file )
A__ : int ="""lower"""
A__ : Tuple =["""low""", """er</w>"""]
A__ : Optional[Any] =tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
A__ : Union[str, Any] =tokens + ["""<unk>"""]
A__ : List[Any] =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
@slow
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : int =BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Tuple =tokenizer.encode("""sequence builders""" , add_special_tokens=_lowercase )
A__ : List[Any] =tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowercase )
A__ : Dict =tokenizer.build_inputs_with_special_tokens(_lowercase )
A__ : List[str] =tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 703 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 0 |
'''simple docstring'''
import sys
__snake_case : Optional[int] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __lowerCamelCase ( __snake_case : List[str] = N ) -> List[Any]:
"""simple docstring"""
A__ : Tuple =-sys.maxsize - 1
for i in range(len(__SCREAMING_SNAKE_CASE ) - 12 ):
A__ : List[str] =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A__ : str =product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
@slow
@require_torch
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : List[Any] =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
A__ : str =BertTokenizer.from_pretrained("""bert-base-uncased""" )
A__ : Tuple =bertabert.config.encoder.vocab_size
A__ : int =tokenizer.sep_token_id
A__ : Dict =tokenizer.cls_token_id
A__ : List[str] =1_28
A__ : str =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
A__ : int =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
A__ : Union[str, Any] =train_dataset.select(range(32 ) )
A__ : int =val_dataset.select(range(16 ) )
A__ : int =4
def _map_to_encoder_decoder_inputs(lowerCAmelCase_ : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ : Tuple =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=A_ , max_length=5_12 )
A__ : Dict =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=A_ , max_length=1_28 )
A__ : Tuple =inputs.input_ids
A__ : int =inputs.attention_mask
A__ : str =outputs.input_ids
A__ : Dict =outputs.input_ids.copy()
A__ : Optional[Any] =[
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
A__ : List[str] =outputs.attention_mask
assert all(len(A_ ) == 5_12 for x in inputs.input_ids )
assert all(len(A_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase_ : str ):
A__ : Optional[int] =pred.label_ids
A__ : List[str] =pred.predictions
# all unnecessary tokens are removed
A__ : Union[str, Any] =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
A__ : Optional[Any] =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
A__ : List[Any] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
A__ : List[str] =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
A__ : str =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
A__ : List[Any] =self.get_auto_remove_tmp_dir()
A__ : Optional[int] =SeqaSeqTrainingArguments(
output_dir=A_ , per_device_train_batch_size=A_ , per_device_eval_batch_size=A_ , predict_with_generate=A_ , evaluation_strategy="""steps""" , do_train=A_ , do_eval=A_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A__ : Tuple =SeqaSeqTrainer(
model=A_ , args=A_ , compute_metrics=_compute_metrics , train_dataset=A_ , eval_dataset=A_ , tokenizer=A_ , )
# start training
trainer.train()
| 705 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : List[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class lowerCamelCase ( __lowercase ):
'''simple docstring'''
__snake_case = 'distilbert'
__snake_case = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Tuple , lowerCAmelCase_ : Any=3_05_22 , lowerCAmelCase_ : List[str]=5_12 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Optional[Any]=7_68 , lowerCAmelCase_ : Optional[int]=4 * 7_68 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[str]=0.2 , lowerCAmelCase_ : List[Any]=0 , **lowerCAmelCase_ : List[Any] , ) -> Tuple:
'''simple docstring'''
A__ : Any =vocab_size
A__ : List[str] =max_position_embeddings
A__ : Tuple =sinusoidal_pos_embds
A__ : Union[str, Any] =n_layers
A__ : Tuple =n_heads
A__ : int =dim
A__ : Union[str, Any] =hidden_dim
A__ : List[str] =dropout
A__ : Dict =attention_dropout
A__ : Any =activation
A__ : Union[str, Any] =initializer_range
A__ : Optional[Any] =qa_dropout
A__ : Tuple =seq_classif_dropout
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ )
class lowerCamelCase ( __lowercase ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Tuple ={0: "batch", 1: "choice", 2: "sequence"}
else:
A__ : Dict ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 706 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[Any]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : Any=[3, 4, 6, 3] , lowerCAmelCase_ : str="preactivation" , lowerCAmelCase_ : Union[str, Any]="relu" , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : Any =num_channels
A__ : Optional[Any] =embedding_size
A__ : int =hidden_sizes
A__ : Union[str, Any] =depths
A__ : Tuple =layer_type
A__ : List[str] =hidden_act
A__ : List[str] =global_padding
A__ : Dict =num_groups
A__ : List[Any] =drop_path_rate
A__ : Dict =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : str =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 707 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
while b:
A__ : int =b, a % b
return a
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__snake_case, a % b )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}" )
if __name__ == "__main__":
main()
| 708 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__snake_case : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =os.path.dirname(os.path.realpath(lowerCAmelCase__ ) )
A__ : Union[str, Any] =os.path.join(lowerCAmelCase__, """words.txt""" )
A__ : str =''
with open(lowerCAmelCase__ ) as f:
A__ : Optional[Any] =f.readline()
A__ : Any =[word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
A__ : Dict =[
word
for word in [sum(ord(lowerCAmelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 709 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Dict ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> bool:
"""simple docstring"""
A__ : Optional[Any] =0
A__ : int =number
while duplicate > 0:
A__ , A__ : List[str] =divmod(__snake_case, 10 )
fact_sum += factorial(__snake_case )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
__snake_case : Optional[int] = int(input('Enter number: ').strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 710 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 0 |
'''simple docstring'''
__snake_case : Any = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case : Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , **lowerCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(lowerCAmelCase_ )
def lowercase__ ( self : str , **lowerCAmelCase_ : str ) -> Dict:
'''simple docstring'''
A__ : Any ={}
A__ : List[str] ={}
A__ : int ={}
# preprocess args
if "points_per_batch" in kwargs:
A__ : List[Any] =kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A__ : Any =kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A__ : Optional[Any] =kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A__ : Union[str, Any] =kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A__ : str =kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A__ : List[str] =kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A__ : Dict =kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A__ : List[str] =kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A__ : int =kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A__ : List[str] =kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A__ : str =kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A__ : Union[str, Any] =kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
return super().__call__(lowerCAmelCase_ , *lowerCAmelCase_ , num_workers=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any]=64 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : float = 5_12 / 15_00 , lowerCAmelCase_ : Optional[int] = 32 , lowerCAmelCase_ : Optional[int] = 1 , ) -> str:
'''simple docstring'''
A__ : Any =load_image(lowerCAmelCase_ )
A__ : int =self.image_processor.size["""longest_edge"""]
A__ , A__ , A__ , A__ : Dict =self.image_processor.generate_crop_boxes(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[str] =self.image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A__ : Tuple =self.get_inference_context()
with inference_context():
A__ : Union[str, Any] =self._ensure_tensor_on_device(lowerCAmelCase_ , device=self.device )
A__ : List[Any] =self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A__ : int =image_embeddings
A__ : Dict =grid_points.shape[1]
A__ : List[Any] =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =grid_points[:, i : i + points_per_batch, :, :]
A__ : List[str] =input_labels[:, i : i + points_per_batch]
A__ : Dict =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowercase__ ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any=0.88 , lowerCAmelCase_ : Tuple=0.95 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Dict=1 , ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =model_inputs.pop("""input_boxes""" )
A__ : Union[str, Any] =model_inputs.pop("""is_last""" )
A__ : Union[str, Any] =model_inputs.pop("""original_sizes""" ).tolist()
A__ : Tuple =model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A__ : Optional[int] =self.model(**lowerCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A__ : Union[str, Any] =model_outputs["""pred_masks"""]
A__ : str =self.image_processor.post_process_masks(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , binarize=lowerCAmelCase_ )
A__ : int =model_outputs["""iou_scores"""]
A__ , A__ , A__ : List[Any] =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=0.7 , ) -> List[str]:
'''simple docstring'''
A__ : Dict =[]
A__ : Any =[]
A__ : Optional[int] =[]
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A__ : Optional[Any] =torch.cat(lowerCAmelCase_ )
A__ : Dict =torch.cat(lowerCAmelCase_ )
A__ , A__ , A__ , A__ : int =self.image_processor.post_process_for_mask_generation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[Any] =defaultdict(lowerCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase_ )
A__ : List[Any] ={}
if output_rle_mask:
A__ : List[Any] =rle_mask
if output_bboxes_mask:
A__ : int =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 712 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 0 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__snake_case : Any = True
except ImportError:
__snake_case : Union[str, Any] = False
try:
from torch.hub import _get_torch_home
__snake_case : Optional[int] = _get_torch_home()
except ImportError:
__snake_case : int = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__snake_case : int = os.path.join(torch_cache_home, 'transformers')
__snake_case : int = 'https://cdn.huggingface.co'
__snake_case : List[str] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__snake_case : Optional[int] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__snake_case : int = os.path.join(PATH, 'config.yaml')
__snake_case : Dict = os.path.join(PATH, 'attributes.txt')
__snake_case : List[Any] = os.path.join(PATH, 'objects.txt')
__snake_case : List[str] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__snake_case : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__snake_case : Union[str, Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__snake_case : List[str] = 'pytorch_model.bin'
__snake_case : Optional[int] = 'config.yaml'
def __lowerCamelCase ( __snake_case : int=OBJECTS, __snake_case : str=ATTRIBUTES ) -> Any:
"""simple docstring"""
A__ : Dict =[]
with open(a_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
A__ : List[Any] =[]
with open(a_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ : List[str] =OrderedDict()
with open(a_, """rb""" ) as f:
A__ : Optional[int] =pkl.load(a_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
A__ : str =ckp.pop(a_ )
if isinstance(a_, np.ndarray ):
A__ : List[str] =torch.tensor(a_ )
else:
assert isinstance(a_, torch.tensor ), type(a_ )
A__ : int =v
return r
class lowerCamelCase :
'''simple docstring'''
__snake_case = {}
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict = "root" , lowerCAmelCase_ : Optional[int]=0 ) -> int:
'''simple docstring'''
A__ : Tuple =name
A__ : Dict =level
A__ : int ={}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A__ : Optional[int] =copy.deepcopy(_UpperCAmelCase )
A__ : str =copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A__ : List[Any] =Config(_UpperCAmelCase , name=_UpperCAmelCase , level=level + 1 )
A__ : int =v
setattr(self , _UpperCAmelCase , _UpperCAmelCase )
A__ : Tuple =d
def __repr__( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =val
A__ : Optional[Any] =val
A__ : List[str] =key.split(""".""" )
A__ : Union[str, Any] =len(_UpperCAmelCase ) - 1
A__ : Any =self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self , _UpperCAmelCase ) and isinstance(getattr(self , _UpperCAmelCase ) , _UpperCAmelCase ):
setattr(getattr(self , _UpperCAmelCase ) , """.""".join(levels[i:] ) , _UpperCAmelCase )
if l == last_level:
A__ : str =val
else:
A__ : Any =pointer[l]
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return self._pointer
def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with open(f"{file_name}" , """w""" ) as stream:
dump(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
with open(f"{file_name}" , """w""" ) as stream:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowercase__ ( lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
with open(_UpperCAmelCase ) as stream:
A__ : int =load(_UpperCAmelCase , Loader=_UpperCAmelCase )
return data
def __str__( self : List[str] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =''' '''
if self._name != "root":
A__ : Union[str, Any] =f"{t * (self._level-1)}{self._name}:\n"
else:
A__ : Optional[Any] =''''''
A__ : Optional[Any] =self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n"
A__ : Any =level
return r[:-1]
@classmethod
def lowercase__ ( cls : Optional[int] , lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
A__ : Tuple =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[Any] , lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ : Dict =kwargs.pop("""cache_dir""" , _UpperCAmelCase )
A__ : int =kwargs.pop("""force_download""" , _UpperCAmelCase )
A__ : Tuple =kwargs.pop("""resume_download""" , _UpperCAmelCase )
A__ : Dict =kwargs.pop("""proxies""" , _UpperCAmelCase )
A__ : Tuple =kwargs.pop("""local_files_only""" , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
A__ : List[str] =os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
A__ : Optional[int] =pretrained_model_name_or_path
else:
A__ : List[str] =hf_bucket_url(_UpperCAmelCase , filename=_UpperCAmelCase , use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
A__ : List[str] =cached_path(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A__ : Tuple =Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
A__ : Tuple ='''Can\'t load config for'''
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def __lowerCamelCase ( __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] =torch.load("""dump.pt""", map_location=in_tensor.device )
A__ : List[str] =in_tensor.numpy()
A__ : List[Any] =out_tensor.numpy()[0]
print(na.shape, na[0, 0, :5] )
print(na.shape, na[0, 0, :5] )
assert np.allclose(a_, a_, rtol=0.01, atol=0.1 ), (
f"{sum([1 for x in np.isclose(a_, a_, rtol=0.01, atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def __lowerCamelCase ( __snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =urlparse(a_ )
return parsed.scheme in ("http", "https")
def __lowerCamelCase ( __snake_case : str, __snake_case : str, __snake_case : str=True ) -> str:
"""simple docstring"""
A__ : Any =CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A__ : Optional[int] ='''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[Any], __snake_case : List[str]=None, __snake_case : Optional[Any]=0, __snake_case : Optional[Any]=None, ) -> int:
"""simple docstring"""
A__ : List[str] ='''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(a_, a_ ):
ua += "; " + "; ".join("""{}/{}""".format(a_, a_ ) for k, v in user_agent.items() )
elif isinstance(a_, a_ ):
ua += "; " + user_agent
A__ : Optional[Any] ={'''user-agent''': ua}
if resume_size > 0:
A__ : Dict ='''bytes=%d-''' % (resume_size,)
A__ : Dict =requests.get(a_, stream=a_, proxies=a_, headers=a_ )
if response.status_code == 416: # Range not satisfiable
return
A__ : Tuple =response.headers.get("""Content-Length""" )
A__ : Dict =resume_size + int(a_ ) if content_length is not None else None
A__ : Optional[int] =tqdm(
unit="""B""", unit_scale=a_, total=a_, initial=a_, desc="""Downloading""", )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(a_ ) )
temp_file.write(a_ )
progress.close()
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Tuple=None, __snake_case : Optional[Any]=False, __snake_case : List[Any]=None, __snake_case : Union[str, Any]=10, __snake_case : Union[str, Any]=False, __snake_case : List[Any]=None, __snake_case : Dict=False, ) -> Any:
"""simple docstring"""
if cache_dir is None:
A__ : int =TRANSFORMERS_CACHE
if isinstance(a_, a_ ):
A__ : List[str] =str(a_ )
os.makedirs(a_, exist_ok=a_ )
A__ : List[str] =None
if not local_files_only:
try:
A__ : int =requests.head(a_, allow_redirects=a_, proxies=a_, timeout=a_ )
if response.status_code == 200:
A__ : Any =response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A__ : Any =url_to_filename(a_, a_ )
# get cache path to put the file
A__ : Tuple =os.path.join(a_, a_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(a_ ):
return cache_path
else:
A__ : Tuple =[
file
for file in fnmatch.filter(os.listdir(a_ ), filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(a_ ) > 0:
return os.path.join(a_, matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set \'local_files_only\'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(a_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A__ : Dict =cache_path + '''.lock'''
with FileLock(a_ ):
# If the download just completed while the lock was activated.
if os.path.exists(a_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A__ : Optional[int] =cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(a_, """a+b""" ) as f:
yield f
A__ : Optional[int] =_resumable_file_manager
if os.path.exists(a_ ):
A__ : Optional[int] =os.stat(a_ ).st_size
else:
A__ : Any =0
else:
A__ : List[str] =partial(tempfile.NamedTemporaryFile, dir=a_, delete=a_ )
A__ : int =0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""", a_, temp_file.name, )
http_get(
a_, a_, proxies=a_, resume_size=a_, user_agent=a_, )
os.replace(temp_file.name, a_ )
A__ : Dict ={'''url''': url, '''etag''': etag}
A__ : Optional[Any] =cache_path + '''.json'''
with open(a_, """w""" ) as meta_file:
json.dump(a_, a_ )
return cache_path
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[str]=None ) -> Any:
"""simple docstring"""
A__ : Union[str, Any] =url.encode("""utf-8""" )
A__ : Tuple =shaaaa(a_ )
A__ : Optional[Any] =url_hash.hexdigest()
if etag:
A__ : Optional[Any] =etag.encode("""utf-8""" )
A__ : Tuple =shaaaa(a_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[int]=None, __snake_case : Tuple=False, __snake_case : Optional[int]=None, __snake_case : Union[str, Any]=False, __snake_case : Tuple=None, __snake_case : List[Any]=False, __snake_case : str=False, __snake_case : List[Any]=False, ) -> Any:
"""simple docstring"""
if cache_dir is None:
A__ : Dict =TRANSFORMERS_CACHE
if isinstance(a_, a_ ):
A__ : int =str(a_ )
if isinstance(a_, a_ ):
A__ : List[Any] =str(a_ )
if is_remote_url(a_ ):
# URL, so get it from the cache (downloading if necessary)
A__ : List[str] =get_from_cache(
a_, cache_dir=a_, force_download=a_, proxies=a_, resume_download=a_, user_agent=a_, local_files_only=a_, )
elif os.path.exists(a_ ):
# File, and it exists.
A__ : Any =url_or_filename
elif urlparse(a_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(a_ ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(a_ ) )
if extract_compressed_file:
if not is_zipfile(a_ ) and not tarfile.is_tarfile(a_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A__ : List[str] =os.path.split(a_ )
A__ : Optional[Any] =output_file.replace(""".""", """-""" ) + '''-extracted'''
A__ : Optional[int] =os.path.join(a_, a_ )
if os.path.isdir(a_ ) and os.listdir(a_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A__ : List[str] =output_path + '''.lock'''
with FileLock(a_ ):
shutil.rmtree(a_, ignore_errors=a_ )
os.makedirs(a_ )
if is_zipfile(a_ ):
with ZipFile(a_, """r""" ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
elif tarfile.is_tarfile(a_ ):
A__ : Dict =tarfile.open(a_ )
tar_file.extractall(a_ )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(a_ ) )
return output_path_extracted
return output_path
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Optional[int]="," ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_, a_ )
if os.path.isfile(a_ ):
with open(a_ ) as f:
A__ : Any =eval(f.read() )
else:
A__ : Tuple =requests.get(a_ )
try:
A__ : int =requests.json()
except Exception:
A__ : Optional[Any] =req.content.decode()
assert data is not None, "could not connect"
try:
A__ : Optional[int] =eval(a_ )
except Exception:
A__ : Any =data.split("""\n""" )
req.close()
return data
def __lowerCamelCase ( __snake_case : List[str] ) -> str:
"""simple docstring"""
A__ : List[Any] =requests.get(a_ )
A__ : Tuple =np.array(Image.open(BytesIO(response.content ) ) )
return img
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
A__ : List[Any] =url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(a_ )
with open(a_, """rb""" ) as stream:
A__ : Optional[int] =pkl.load(a_ )
A__ : Optional[int] =weights.pop("""model""" )
A__ : Dict ={}
for k, v in model.items():
A__ : str =torch.from_numpy(a_ )
if "running_var" in k:
A__ : List[Any] =torch.tensor([0] )
A__ : Optional[Any] =k.replace("""running_var""", """num_batches_tracked""" )
A__ : str =zero
return new
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
print(f"{os.path.abspath(os.path.join(a_, os.pardir ) )}/demo.ipynb" )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : List[Any]="RGB" ) -> str:
"""simple docstring"""
assert isinstance(a_, a_ )
if os.path.isfile(a_ ):
A__ : str =cva.imread(a_ )
else:
A__ : Dict =get_image_from_url(a_ )
assert img is not None, f"could not connect to: {im}"
A__ : Tuple =cva.cvtColor(a_, cva.COLOR_BGR2RGB )
if input_format == "RGB":
A__ : Dict =img[:, :, ::-1]
return img
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : str=1 ) -> List[str]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0, len(a_ ), a_ ))
| 713 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Any ) -> Dict:
"""simple docstring"""
A__ : Any =[int(UpperCAmelCase__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCAmelCase__ ) == 4 and all(0 <= int(UpperCAmelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__snake_case : List[Any] = input().strip()
__snake_case : List[Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 714 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'bert-generation'
def __init__( self : str , lowerCAmelCase_ : List[Any]=5_03_58 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : Optional[int]=24 , lowerCAmelCase_ : Union[str, Any]=16 , lowerCAmelCase_ : int=40_96 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Tuple=1e-12 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
A__ : Any =vocab_size
A__ : List[str] =hidden_size
A__ : List[Any] =num_hidden_layers
A__ : Optional[Any] =num_attention_heads
A__ : Dict =hidden_act
A__ : Dict =intermediate_size
A__ : Union[str, Any] =hidden_dropout_prob
A__ : Tuple =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : List[str] =initializer_range
A__ : int =layer_norm_eps
A__ : Tuple =position_embedding_type
A__ : Any =use_cache
| 715 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case : Union[str, Any] = logging.getLogger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = """summarization"""
__snake_case = ["""loss"""]
__snake_case = ROUGE_KEYS
__snake_case = """rouge2"""
def __init__( self : List[str] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
A__ : str =False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCAmelCase_ , num_labels=lowerCAmelCase_ , mode=self.mode , **lowerCAmelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
A__ : Dict =Path(self.output_dir ) / """metrics.json"""
A__ : int =Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
A__ : Optional[Any] =0
A__ : Optional[int] =defaultdict(lowerCAmelCase_ )
A__ : Any =self.config.model_type
A__ : Dict =self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
A__ : Optional[Any] ={
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
A__ : Union[str, Any] ={
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
A__ : int ={k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A__ : int ={
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A__ : Any =get_git_info()["""repo_sha"""]
A__ : Union[str, Any] =hparams.num_workers
A__ : List[Any] =None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase_ ):
A__ : List[str] =self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A__ : Optional[Any] =self.decoder_start_token_id
A__ : Any =(
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
A__ : Union[str, Any] =False
A__ : Optional[int] =self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A__ : Union[str, Any] =self.hparams.eval_max_gen_length
else:
A__ : int =self.model.config.max_length
A__ : Tuple =self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase__ ( self : str , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
A__ : Any ={
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
A__ : Any =True
return readable_batch
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
return self.model(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : List[str] =self.tokenizer.batch_decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return lmap(str.strip , lowerCAmelCase_ )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =self.tokenizer.pad_token_id
A__ , A__ : Any =batch["""input_ids"""], batch["""attention_mask"""]
A__ : Any =batch["""labels"""]
if isinstance(self.model , lowerCAmelCase_ ):
A__ : Union[str, Any] =self.model._shift_right(lowerCAmelCase_ )
else:
A__ : List[Any] =shift_tokens_right(lowerCAmelCase_ , lowerCAmelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A__ : Dict =decoder_input_ids
self.save_readable_batch(lowerCAmelCase_ )
A__ : Optional[int] =self(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
A__ : str =outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A__ : Optional[int] =nn.CrossEntropyLoss(ignore_index=lowerCAmelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
A__ : Union[str, Any] =ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A__ : Any =nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
A__ , A__ : int =label_smoothed_nll_loss(
lowerCAmelCase_ , lowerCAmelCase_ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase_ )
return (loss,)
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
A__ : List[str] =self._step(lowerCAmelCase_ )
A__ : List[Any] =dict(zip(self.loss_names , lowerCAmelCase_ ) )
# tokens per batch
A__ : int =batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
A__ : Optional[Any] =batch["""input_ids"""].shape[0]
A__ : Tuple =batch["""input_ids"""].eq(self.pad ).sum()
A__ : Optional[int] =batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
return self._generative_step(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]="val" ) -> Union[str, Any]:
'''simple docstring'''
self.step_count += 1
A__ : Optional[Any] ={k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A__ : Union[str, Any] =losses["""loss"""]
A__ : Optional[int] ={
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
A__ : Any =(
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A__ : Optional[Any] =torch.tensor(lowerCAmelCase_ ).type_as(lowerCAmelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase_ )
A__ : Union[str, Any] ={f"{prefix}_avg_{k}": x for k, x in losses.items()}
A__ : str =self.step_count
self.metrics[prefix].append(lowerCAmelCase_ ) # callback writes this to self.metrics_save_path
A__ : Union[str, Any] =flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def lowercase__ ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Dict =time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A__ : Optional[Any] =self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A__ : str =(time.time() - ta) / batch["""input_ids"""].shape[0]
A__ : Optional[Any] =self.ids_to_clean_text(lowerCAmelCase_ )
A__ : List[Any] =self.ids_to_clean_text(batch["""labels"""] )
A__ : Union[str, Any] =self._step(lowerCAmelCase_ )
A__ : Dict =dict(zip(self.loss_names , lowerCAmelCase_ ) )
A__ : Optional[int] =self.calc_generative_metrics(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[Any] =np.mean(lmap(lowerCAmelCase_ , lowerCAmelCase_ ) )
base_metrics.update(gen_time=lowerCAmelCase_ , gen_len=lowerCAmelCase_ , preds=lowerCAmelCase_ , target=lowerCAmelCase_ , **lowerCAmelCase_ )
return base_metrics
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
return self._generative_step(lowerCAmelCase_ )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict ) -> str:
'''simple docstring'''
return self.validation_epoch_end(lowerCAmelCase_ , prefix="""test""" )
def lowercase__ ( self : Any , lowerCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
A__ : int =self.n_obs[type_path]
A__ : Dict =self.target_lens[type_path]
A__ : Dict =self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase_ , n_obs=lowerCAmelCase_ , max_target_length=lowerCAmelCase_ , **self.dataset_kwargs , )
return dataset
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] = False ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.get_dataset(lowerCAmelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A__ : List[str] =dataset.make_sortish_sampler(lowerCAmelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase_ , num_workers=self.num_workers , sampler=lowerCAmelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A__ : Tuple =dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase_ , batch_sampler=lowerCAmelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase_ , num_workers=self.num_workers , sampler=lowerCAmelCase_ , )
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : int =self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase_ )
return dataloader
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase__ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_ )
add_generic_args(lowerCAmelCase_ , lowerCAmelCase_ )
parser.add_argument(
"""--max_source_length""" , default=10_24 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_42 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_42 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCAmelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCAmelCase_ , default=-1 , required=lowerCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCAmelCase_ , default=5_00 , required=lowerCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCAmelCase_ , default=-1 , required=lowerCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCAmelCase_ , default="""summarization""" , required=lowerCAmelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase_ , default=0.0 , required=lowerCAmelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCAmelCase_ , default="""""" , required=lowerCAmelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase_ , default="""""" , required=lowerCAmelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCAmelCase_ , default=1 , required=lowerCAmelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCAmelCase_ , default=-1 , required=lowerCAmelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = """translation"""
__snake_case = ["""loss"""]
__snake_case = ["""bleu"""]
__snake_case = """bleu"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Dict =hparams.src_lang
A__ : Any =hparams.tgt_lang
def lowercase__ ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
return calculate_bleu(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_UpperCamelCase )
check_output_dir(_UpperCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
A__ : str =SummarizationModule(_UpperCamelCase )
else:
A__ : Any =TranslationModule(_UpperCamelCase )
A__ : str =Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
A__ : List[Any] =True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A__ : Any =os.environ.get("""WANDB_PROJECT""", _UpperCamelCase )
A__ : Dict =WandbLogger(name=model.output_dir.name, project=_UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A__ : Optional[int] =WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
A__ : str =get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
A__ : Union[str, Any] =False
A__ : Union[str, Any] =args.val_metric == """loss"""
A__ : int =generic_train(
_UpperCamelCase, _UpperCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, _UpperCamelCase ), early_stopping_callback=_UpperCamelCase, logger=_UpperCamelCase, )
pickle_save(model.hparams, model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
A__ : str =""""""
A__ : str =sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=_UpperCamelCase ) )
if checkpoints:
A__ : int =checkpoints[-1]
A__ : Optional[int] =checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
__snake_case : Any = pl.Trainer.add_argparse_args(parser)
__snake_case : Any = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case : Tuple = parser.parse_args()
main(args)
| 716 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCamelCase ( __snake_case : Tuple ) -> Any:
"""simple docstring"""
A__ : List[Any] =torch.exp(__snake_case )
A__ : Optional[int] =torch.sum(__snake_case, dim=1 ) # sum of exp(x_i)
A__ : Union[str, Any] =torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
super().__init__()
A__ : str =config.output_attentions
A__ : Any =config.output_hidden_states
A__ : Dict =nn.ModuleList([BertLayer(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
A__ : int =nn.ModuleList([BertHighway(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
A__ : Optional[Any] =[-1 for _ in range(config.num_hidden_layers )]
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
if (type(lowerCAmelCase_ ) is float) or (type(lowerCAmelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
A__ : List[Any] =x
else:
A__ : List[str] =x
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
A__ : Any =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , ) -> int:
'''simple docstring'''
A__ : List[str] =()
A__ : List[Any] =()
A__ : Tuple =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
A__ : List[str] =all_hidden_states + (hidden_states,)
A__ : Optional[int] =layer_module(
lowerCAmelCase_ , lowerCAmelCase_ , head_mask[i] , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =layer_outputs[0]
if self.output_attentions:
A__ : Dict =all_attentions + (layer_outputs[1],)
A__ : Tuple =(hidden_states,)
if self.output_hidden_states:
A__ : Any =current_outputs + (all_hidden_states,)
if self.output_attentions:
A__ : Dict =current_outputs + (all_attentions,)
A__ : Dict =self.highway[i](lowerCAmelCase_ )
# logits, pooled_output
if not self.training:
A__ : Optional[int] =highway_exit[0]
A__ : Optional[Any] =entropy(lowerCAmelCase_ )
A__ : Optional[int] =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
A__ : Union[str, Any] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
A__ : Optional[Any] =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCAmelCase_ , i + 1 )
else:
A__ : Optional[Any] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
A__ : str =all_hidden_states + (hidden_states,)
A__ : Union[str, Any] =(hidden_states,)
if self.output_hidden_states:
A__ : str =outputs + (all_hidden_states,)
if self.output_attentions:
A__ : Any =outputs + (all_attentions,)
A__ : Optional[Any] =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , __UpperCAmelCase , )
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : Tuple =config
A__ : Tuple =BertEmbeddings(lowerCAmelCase_ )
A__ : int =DeeBertEncoder(lowerCAmelCase_ )
A__ : Tuple =BertPooler(lowerCAmelCase_ )
self.init_weights()
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
A__ : Any =value
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase_ )
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
A__ : List[str] =input_ids.size()
elif inputs_embeds is not None:
A__ : str =inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
A__ : Dict =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A__ : Optional[int] =torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ )
if encoder_attention_mask is None:
A__ : Tuple =torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ )
if token_type_ids is None:
A__ : Any =torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A__ : int =self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
A__ : Optional[int] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
A__ : Optional[Any] =encoder_attention_mask[:, None, None, :]
A__ : Optional[Any] =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
A__ : Any =(1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A__ : Optional[int] =self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers )
A__ : Union[str, Any] =self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ )
A__ : Tuple =self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
A__ : str =encoder_outputs[0]
A__ : List[str] =self.pooler(lowerCAmelCase_ )
A__ : Dict =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =message
A__ : Optional[Any] =exit_layer # start from 1!
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ : Dict =BertPooler(lowerCAmelCase_ )
A__ : Tuple =nn.Dropout(config.hidden_dropout_prob )
A__ : Union[str, Any] =nn.Linear(config.hidden_size , config.num_labels )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =encoder_outputs[0]
A__ : Union[str, Any] =self.pooler(lowerCAmelCase_ )
# "return" pooler_output
# BertModel
A__ : int =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
A__ : List[str] =bmodel_output[1]
A__ : Optional[int] =self.dropout(lowerCAmelCase_ )
A__ : int =self.classifier(lowerCAmelCase_ )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __UpperCAmelCase , )
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : Optional[Any] =config.num_labels
A__ : Dict =config.num_hidden_layers
A__ : int =DeeBertModel(lowerCAmelCase_ )
A__ : Tuple =nn.Dropout(config.hidden_dropout_prob )
A__ : Union[str, Any] =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=-1 , lowerCAmelCase_ : str=False , ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.num_layers
try:
A__ : List[Any] =self.bert(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
A__ : Optional[int] =outputs[1]
A__ : Optional[int] =self.dropout(lowerCAmelCase_ )
A__ : Optional[Any] =self.classifier(lowerCAmelCase_ )
A__ : str =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ : Optional[Any] =e.message
A__ : int =e.exit_layer
A__ : Optional[Any] =outputs[0]
if not self.training:
A__ : Tuple =entropy(lowerCAmelCase_ )
A__ : Union[str, Any] =[]
A__ : Union[str, Any] =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ : Any =MSELoss()
A__ : int =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ : Dict =CrossEntropyLoss()
A__ : List[str] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ : str =[]
for highway_exit in outputs[-1]:
A__ : int =highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ : List[Any] =MSELoss()
A__ : str =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ : Any =CrossEntropyLoss()
A__ : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase_ )
if train_highway:
A__ : Tuple =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ : int =(loss,) + outputs
if not self.training:
A__ : int =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ : Dict =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 717 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
A__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
A__ : int =controlnet_params
A__ : Union[str, Any] ='''bird'''
A__ : str =jax.device_count()
A__ : int =pipe.prepare_text_inputs([prompts] * num_samples )
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
A__ : Optional[int] =pipe.prepare_image_inputs([canny_image] * num_samples )
A__ : int =jax.random.PRNGKey(0 )
A__ : Dict =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : Tuple =replicate(lowerCAmelCase_ )
A__ : int =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : Dict =pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
A__ : Optional[int] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ : List[str] =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : List[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : int =jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Dict =FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
A__ : Optional[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
A__ : Any =controlnet_params
A__ : Union[str, Any] ='''Chef in the kitchen'''
A__ : Union[str, Any] =jax.device_count()
A__ : str =pipe.prepare_text_inputs([prompts] * num_samples )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
A__ : Tuple =pipe.prepare_image_inputs([pose_image] * num_samples )
A__ : List[Any] =jax.random.PRNGKey(0 )
A__ : Any =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =replicate(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : Tuple =shard(lowerCAmelCase_ )
A__ : Optional[Any] =pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
A__ : str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ : Optional[Any] =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[Any] =jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 719 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> list[str]:
"""simple docstring"""
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase__ ) )]
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
A__ : Optional[Any] =all_rotations(lowerCamelCase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
A__ : Optional[Any] ={
"""bwt_string""": """""".join([word[-1] for word in rotations] ),
"""idx_original_string""": rotations.index(lowerCamelCase__ ),
}
return response
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[Any] ) -> str:
"""simple docstring"""
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
A__ : Union[str, Any] =int(lowerCamelCase__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(lowerCamelCase__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
A__ : List[Any] =[""""""] * len(lowerCamelCase__ )
for _ in range(len(lowerCamelCase__ ) ):
for i in range(len(lowerCamelCase__ ) ):
A__ : str =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__snake_case : Tuple = 'Provide a string that I will generate its BWT transform: '
__snake_case : Union[str, Any] = input(entry_msg).strip()
__snake_case : Union[str, Any] = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string \'{s}\' results """
F"""in \'{result['bwt_string']}\'"""
)
__snake_case : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' """
F"""we get original string \'{original_string}\'"""
)
| 721 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 0 |
'''simple docstring'''
__snake_case : Union[str, Any] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __lowerCamelCase ( __snake_case : str ) -> int:
"""simple docstring"""
A__ : Any ={"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
A__ : Stack[int] =Stack()
A__ : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(__snake_case )
elif i == ")":
# RULE 4
A__ : List[Any] =operator_stack.peek()
operator_stack.pop()
A__ : Any =operand_stack.peek()
operand_stack.pop()
A__ : Optional[int] =operand_stack.peek()
operand_stack.pop()
A__ : Dict =operators[opr](__snake_case, __snake_case )
operand_stack.push(__snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__snake_case : Optional[int] = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__snake_case : Union[str, Any] = numpy.array([0, 0])
__snake_case : int = numpy.array([0.5, 0.8660254])
__snake_case : List[Any] = numpy.array([1, 0])
__snake_case : Any = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __lowerCamelCase ( __snake_case : list[numpy.ndarray], __snake_case : int ) -> list[numpy.ndarray]:
"""simple docstring"""
A__ : str =initial_vectors
for _ in range(__snake_case ):
A__ : List[str] =iteration_step(__snake_case )
return vectors
def __lowerCamelCase ( __snake_case : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
A__ : Tuple =[]
for i, start_vector in enumerate(vectors[:-1] ):
A__ : int =vectors[i + 1]
new_vectors.append(__snake_case )
A__ : Tuple =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __lowerCamelCase ( __snake_case : numpy.ndarray, __snake_case : float ) -> numpy.ndarray:
"""simple docstring"""
A__ : int =numpy.radians(__snake_case )
A__ : List[str] =numpy.cos(__snake_case ), numpy.sin(__snake_case )
A__ : Union[str, Any] =numpy.array(((c, -s), (s, c)) )
return numpy.dot(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : list[numpy.ndarray] ) -> None:
"""simple docstring"""
A__ : Union[str, Any] =plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A__ : int =zip(*__snake_case )
plt.plot(__snake_case, __snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[int] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__snake_case : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] =os.getenv("""SM_HP_MP_PARAMETERS""", """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
A__ : Optional[Any] =json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
A__ : Optional[Any] =os.getenv("""SM_FRAMEWORK_PARAMS""", """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
A__ : Any =json.loads(__snake_case )
if not mpi_options.get("""sagemaker_mpi_enabled""", __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , lowerCAmelCase_ , )
@cached_property
def lowercase__ ( self : str ) -> "torch.device":
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
A__ : Union[str, Any] =torch.device("""cpu""" )
A__ : str =0
elif is_sagemaker_model_parallel_available():
A__ : int =smp.local_rank()
A__ : Tuple =torch.device("""cuda""" , lowerCAmelCase_ )
A__ : Any =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
A__ : Tuple =int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
A__ : Any =torch.device("""cuda""" , self.local_rank )
A__ : Union[str, Any] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
A__ : Any =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
A__ : Union[str, Any] =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
A__ : Any =torch.device("""cuda""" , self.local_rank )
A__ : str =1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase_ )
return device
@property
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return False
| 702 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.