code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase : Any = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCAmelCase : Dict = []
lowerCAmelCase : List[Any] = []
lowerCAmelCase : List[str] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
lowerCAmelCase : List[Any] = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"emoji": True,
},
}
]
lowerCAmelCase : Dict = 0
for log in Path().glob("""*.log"""):
lowerCAmelCase : Tuple = 0
with open(log, """r""") as f:
for line in f:
lowerCAmelCase : Dict = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCAmelCase : Union[str, Any] = line["nodeid"]
if line.get("""duration""", None) is not None:
lowerCAmelCase : Tuple = F"""{line['duration']:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase : Tuple = []
log.unlink()
lowerCAmelCase : Optional[int] = ""
lowerCAmelCase : Optional[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Dict = {}
for test in failed_tests:
lowerCAmelCase : int = test[0].split("""::""")
lowerCAmelCase : str = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCAmelCase : Any = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase : Dict = [test[0] for test in failed_table]
lowerCAmelCase : List[str] = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase : Union[str, Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase : List[Any] = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCAmelCase : str = "Too many failed tests, please see the full report in the Action results."
lowerCAmelCase : Dict = len(err) + 10
lowerCAmelCase : int = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCAmelCase : int = "No failed tests! 🤗"
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCAmelCase : int = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCAmelCase : int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
lowerCAmelCase : List[str] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
lowerCAmelCase : List[str] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCAmelCase : Tuple = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCAmelCase : Optional[Any] = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase : List[str] = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase : Any = row[0]
else:
lowerCAmelCase : Tuple = ""
lowerCAmelCase : Union[str, Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 543 |
def lowerCAmelCase_ ( _lowercase : list , _lowercase : int , _lowercase : int = 0 , _lowercase : int = 0) -> int:
"""simple docstring"""
a__ : str = right or len(_lowercase) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowercase , _lowercase , left + 1 , right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : list) ->None:
'''simple docstring'''
A__ = set_counts
A__ = max(UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = [1] * num_sets
A__ = list(range(UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->bool:
'''simple docstring'''
A__ = self.get_parent(UpperCAmelCase__)
A__ = self.get_parent(UpperCAmelCase__)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A__ = 0
A__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A__ = 0
A__ = src_parent
A__ = self.set_counts[src_parent]
A__ = max(self.max_set , UpperCAmelCase__)
return True
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
A__ = self.get_parent(self.parents[disj_set])
return self.parents[disj_set]
| 177 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = XGLMConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=14 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[Any]=0.02 , ) ->str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = ffn_dim
A__ = activation_function
A__ = activation_dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = 0
A__ = 2
A__ = 1
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = self.get_config()
A__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFXGLMModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFXGLMModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=True) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A__ = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
A__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
A__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , seed=[7, 0])
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__)
A__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = '''left'''
# use different length sentences to test batching
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
A__ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding=UpperCAmelCase__)
A__ = inputs['''input_ids''']
A__ = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
A__ = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence])
| 177 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Union[str, Any]=None , ) ->Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]) ->List[str]:
'''simple docstring'''
A__ = FalconModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , ) ->Optional[int]:
'''simple docstring'''
A__ = True
A__ = FalconModel(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , ) ->int:
'''simple docstring'''
A__ = FalconForCausalLM(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , ) ->Optional[int]:
'''simple docstring'''
A__ = True
A__ = True
A__ = FalconForCausalLM(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
# first forward pass
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1)
A__ = torch.cat([input_mask, next_mask] , dim=-1)
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
A__ = FalconModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
A__ = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1).to(UpperCAmelCase__)
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A__ = FalconForSequenceClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = '''single_label_classification'''
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1).to(UpperCAmelCase__)
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A__ = FalconForSequenceClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = input_dict['''input_ids''']
A__ = FalconForCausalLM(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__)
A__ = input_ids.shape[0]
A__ = model._convert_to_rw_cache(result.past_key_values)
A__ = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__)
for layer in range(len(UpperCAmelCase__)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = '''multi_label_classification'''
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1).to(UpperCAmelCase__)
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
A__ = FalconForSequenceClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , '''use_cache'''):
return
A__ = model_class(UpperCAmelCase__).to(UpperCAmelCase__)
if "use_cache" not in inputs:
A__ = True
A__ = model(**UpperCAmelCase__)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
A__ = (
getattr(UpperCAmelCase__ , '''decoder_layers''' , UpperCAmelCase__)
or getattr(UpperCAmelCase__ , '''num_decoder_layers''' , UpperCAmelCase__)
or config.num_hidden_layers
)
A__ = getattr(UpperCAmelCase__ , '''num_kv_heads''' , config.num_attention_heads)
A__ = getattr(UpperCAmelCase__ , '''d_model''' , config.hidden_size)
A__ = embed_dim // num_attention_heads
A__ = outputs['''past_key_values''']
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ , A__ = inputs['''input_ids'''].shape
for i in range(UpperCAmelCase__):
if config.new_decoder_architecture:
A__ = config.num_attention_heads
elif config.multi_query:
A__ = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''')
A__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''')
model.eval()
model.to(UpperCAmelCase__)
A__ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase__)
A__ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
A__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19)
A__ = tokenizer.batch_decode(UpperCAmelCase__)[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
A__ = AutoTokenizer.from_pretrained(UpperCAmelCase__)
A__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__)
model.eval()
model.to(UpperCAmelCase__)
A__ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase__)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4)
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4)
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
A__ = AutoTokenizer.from_pretrained(UpperCAmelCase__)
A__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__)
model.eval()
model.to(device=UpperCAmelCase__)
A__ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase__)
# Test results are the same with and without cache
A__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__)
A__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 87 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A__ : Optional[Any] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
A__ : Any = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
A__ : List[str] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ) -> Tuple:
return float((preds == labels).mean() )
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : int =simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> int:
lowerCamelCase_ : Any =np.array(lowerCamelCase__ )
lowerCamelCase_ : int =np.array(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =en_sentvecs.shape[0]
# mean centering
lowerCamelCase_ : int =en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
lowerCamelCase_ : Dict =in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
lowerCamelCase_ : Dict =cdist(lowerCamelCase__ , lowerCamelCase__ , "cosine" )
lowerCamelCase_ : str =np.array(range(lowerCamelCase__ ) )
lowerCamelCase_ : Any =sim.argsort(axis=1 )[:, :10]
lowerCamelCase_ : Optional[Any] =np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case__ , snake_case__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case__ , snake_case__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 153 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 372 |
__UpperCamelCase : List[str] = 256
# Modulus to hash a string
__UpperCamelCase : int = 1000003
def a_ ( _A , _A ) -> bool:
"""simple docstring"""
snake_case__ = len(_A )
snake_case__ = len(_A )
if p_len > t_len:
return False
snake_case__ = 0
snake_case__ = 0
snake_case__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
snake_case__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a_ ( ) -> None:
"""simple docstring"""
snake_case__ = 'abc1abc12'
snake_case__ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case__ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
snake_case__ = 'ABABX'
snake_case__ = 'ABABZABABYABABX'
assert rabin_karp(_A , _A )
# Test 3)
snake_case__ = 'AAAB'
snake_case__ = 'ABAAAAAB'
assert rabin_karp(_A , _A )
# Test 4)
snake_case__ = 'abcdabcy'
snake_case__ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_A , _A )
# Test 5)
snake_case__ = 'Lü'
snake_case__ = 'Lüsai'
assert rabin_karp(_A , _A )
snake_case__ = 'Lue'
assert not rabin_karp(_A , _A )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 1 |
def _A ( _lowercase , _lowercase ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : str ="▁"
_lowercase : Union[str, Any] ={"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowercase : int ={
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowercase : List[str] ={"vinai/bartpho-syllable": 1024}
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['input_ids', 'attention_mask']
def __init__( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : List[str]="</s>" , __UpperCamelCase : List[Any]="</s>" , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : str , ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ : str = vocab_file
snake_case__ : Any = monolingual_vocab_file
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : int = {}
snake_case__ : List[str] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : str = cnt
cnt += 1
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : int = line.strip().split()[0]
snake_case__ : Optional[int] = len(self.fairseq_tokens_to_ids )
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.__dict__.copy()
snake_case__ : List[Any] = None
snake_case__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : List[str] = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
snake_case__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCAmelCase ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case__ : str = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : int , __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : List[Any] , __UpperCamelCase : Dict ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def lowerCAmelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Any = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : int = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(__UpperCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 574 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase : List[Any] =None
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : Tuple ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Optional[Any] ={
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
_lowercase : int ={
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_lowercase : Tuple ="▁"
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = AlbertTokenizer
def __init__( self : Optional[Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : int="[CLS]" , __UpperCamelCase : List[Any]="[SEP]" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Optional[int]="[SEP]" , __UpperCamelCase : str="<pad>" , __UpperCamelCase : Any="[CLS]" , __UpperCamelCase : List[str]="[MASK]" , **__UpperCamelCase : str , ) -> int:
"""simple docstring"""
snake_case__ : str = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : str = do_lower_case
snake_case__ : int = remove_space
snake_case__ : Optional[Any] = keep_accents
snake_case__ : List[Any] = vocab_file
snake_case__ : Optional[Any] = False if not self.vocab_file else True
def lowerCAmelCase ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case__ : str = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 574 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Dict ,A : int ,A : MutableSequence[float] ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase__ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = degree
def __add__( self : Any ,A : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
UpperCAmelCase__ : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ : Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,_SCREAMING_SNAKE_CASE )
def __sub__( self : Optional[int] ,A : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Any ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] ,A : Polynomial ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,_SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] ,A : int | float ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_SCREAMING_SNAKE_CASE )
return polynomial
def __repr__( self : str ):
'''simple docstring'''
return self.__str__()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase__ : Any = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,_SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] ,A : int | float = 0 ):
'''simple docstring'''
UpperCAmelCase__ : Any = [0] * (self.degree + 2)
UpperCAmelCase__ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase__ : List[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,_SCREAMING_SNAKE_CASE )
def __eq__( self : Any ,A : object ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : str ,A : object ):
'''simple docstring'''
return not self.__eq__(_SCREAMING_SNAKE_CASE )
| 65 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A__ ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase = bertabert.config.encoder.vocab_size
UpperCamelCase = tokenizer.sep_token_id
UpperCamelCase = tokenizer.cls_token_id
UpperCamelCase = 128
UpperCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase = train_dataset.select(range(32 ) )
UpperCamelCase = val_dataset.select(range(16 ) )
UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
UpperCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
UpperCamelCase = inputs.input_ids
UpperCamelCase = inputs.attention_mask
UpperCamelCase = outputs.input_ids
UpperCamelCase = outputs.input_ids.copy()
UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE : str ):
UpperCamelCase = pred.label_ids
UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy='steps' , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 280 | 0 |
"""simple docstring"""
import functools
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = len(_UpperCAmelCase )
A_ : List[Any] = len(_UpperCAmelCase )
@functools.cache
def min_distance(_UpperCAmelCase , _UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A_ : Tuple = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCAmelCase ) , 1 + min_distance(_UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : int = """umt5"""
__lowerCAmelCase : List[str] = ["""past_key_values"""]
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=25_01_12 , _lowerCamelCase : Any=5_12 , _lowerCamelCase : Optional[int]=64 , _lowerCamelCase : str=10_24 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=6 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Optional[Any]=1_28 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Union[str, Any]=1E-6 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Optional[int]="gated-gelu" , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=True , _lowerCamelCase : Tuple="T5Tokenizer" , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Tuple=0 , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=_lowerCamelCase , tokenizer_class=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
A_ : str = vocab_size
A_ : List[Any] = d_model
A_ : Optional[int] = d_kv
A_ : int = d_ff
A_ : Union[str, Any] = num_layers
A_ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Dict = num_heads
A_ : Optional[int] = relative_attention_num_buckets
A_ : Union[str, Any] = relative_attention_max_distance
A_ : Any = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Tuple = initializer_factor
A_ : Optional[int] = feed_forward_proj
A_ : Dict = use_cache
A_ : Any = self.feed_forward_proj.split('''-''' )
A_ : Tuple = act_info[-1]
A_ : Any = act_info[0] == '''gated'''
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
A_ : Dict = '''gelu_new'''
@property
def a_ ( self : Tuple ):
"""simple docstring"""
return self.d_model
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return self.num_heads
@property
def a_ ( self : int ):
"""simple docstring"""
return self.num_layers
class lowercase ( __UpperCAmelCase):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a_ ( self : Any ):
"""simple docstring"""
A_ : str = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : Optional[Any] = '''past_encoder_sequence + sequence'''
A_ : Optional[Any] = {0: '''batch'''}
A_ : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return 13
@property
def a_ ( self : Any ):
"""simple docstring"""
return 5E-4
| 361 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__lowerCamelCase = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class _lowercase ( tr.AbstractTransform ):
def __init__( self , UpperCamelCase_ = " " ):
__magic_name__ = sentence_delimiter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
return list(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = []
for sent_idx, sentence in enumerate(UpperCamelCase_ ):
chars.extend(self.process_string(UpperCamelCase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__lowerCamelCase = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__lowerCamelCase = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
__lowerCamelCase = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowerCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )["wer"]
__magic_name__ = 0
__magic_name__ = 0
for prediction, reference in zip(UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 490 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> np.array:
__magic_name__ = int(np.ceil((x_end - xa) / step_size ) )
__magic_name__ = np.zeros((n + 1,) )
__magic_name__ = ya
__magic_name__ = xa
for k in range(__UpperCamelCase ):
__magic_name__ = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
__magic_name__ = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 490 | 1 |
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0_0 ):
lowerCamelCase_: int = set()
lowerCamelCase_: List[Any] = 0
lowerCamelCase_: Union[str, Any] = n + 1 # maximum limit
for a in range(2 , _UpperCAmelCase ):
for b in range(2 , _UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = a**b # calculates the current power
collect_powers.add(_UpperCAmelCase ) # adds the result to the set
return len(_UpperCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 584 | from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a__ :
_A = OPTConfig
_A = {}
_A = "gelu"
def __init__( self : int , A_ : List[str] , A_ : Dict=13 , A_ : str=7 , A_ : Dict=True , A_ : int=False , A_ : Any=99 , A_ : Dict=16 , A_ : List[str]=2 , A_ : Dict=4 , A_ : Dict=4 , A_ : int="gelu" , A_ : Tuple=0.1 , A_ : Tuple=0.1 , A_ : Dict=20 , A_ : int=2 , A_ : List[Any]=1 , A_ : Optional[Any]=0 , A_ : Dict=16 , A_ : Dict=16 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = parent
lowerCamelCase_: Tuple = batch_size
lowerCamelCase_: str = seq_length
lowerCamelCase_: Any = is_training
lowerCamelCase_: Tuple = use_labels
lowerCamelCase_: Any = vocab_size
lowerCamelCase_: Optional[Any] = hidden_size
lowerCamelCase_: Any = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: Optional[Any] = intermediate_size
lowerCamelCase_: Optional[int] = hidden_act
lowerCamelCase_: Any = hidden_dropout_prob
lowerCamelCase_: Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_: List[Any] = max_position_embeddings
lowerCamelCase_: Union[str, Any] = eos_token_id
lowerCamelCase_: Optional[int] = pad_token_id
lowerCamelCase_: Optional[Any] = bos_token_id
lowerCamelCase_: List[Any] = embed_dim
lowerCamelCase_: Optional[Any] = word_embed_proj_dim
lowerCamelCase_: Any = False
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
lowerCamelCase_: Optional[Any] = prepare_opt_inputs_dict(A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : Any , A_ : Dict , A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = TFOPTModel(config=A_ )
lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""]
lowerCamelCase_: List[str] = input_ids[:1, :]
lowerCamelCase_: int = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_: Tuple = 1
# first forward pass
lowerCamelCase_: int = model(A_ , attention_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_: Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_: List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_: Any = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_: List[str] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_: List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_: Tuple = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
@require_tf
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_A = (TFOPTForCausalLM,) if is_tf_available() else ()
_A = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
_A = False
_A = False
_A = False
_A = 10
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = TFOPTModelTester(self )
lowerCamelCase_: Optional[Any] = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ : Optional[Any] , A_ : Union[str, Any] ):
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_: List[Any] = model_class(config=A_ )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A_ )
lowerCamelCase_: int = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_: List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_ )
# check that weights remain the same after resizing
lowerCamelCase_: int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Tuple = False
self.assertTrue(A_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_ )
lowerCamelCase_: Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Any = False
self.assertTrue(A_ )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return tf.constant(_UpperCAmelCase , dtype=tf.intaa )
@require_tf
class a__ ( unittest.TestCase ):
_A = 99
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_: int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_: Tuple = input_ids.shape[0]
lowerCamelCase_: Optional[int] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase_: Dict = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCamelCase_: Union[str, Any] = tf.not_equal(A_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_: Optional[int] = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state
lowerCamelCase_: Dict = (1, 11, 5_12)
self.assertEqual(output.shape , A_ )
lowerCamelCase_: int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-3 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: int = xla_generate(A_ , A_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-2 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_: List[str] = """facebook/opt-350m"""
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_: Tuple = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" , padding=A_ , add_special_tokens=A_ )
lowerCamelCase_: List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_: int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-125m"""
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: str = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[Any] = model.generate(A_ , max_length=10 )
lowerCamelCase_: List[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = """facebook/opt-350m"""
lowerCamelCase_: Optional[int] = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
lowerCamelCase_: Optional[int] = """left"""
# use different length sentences to test batching
lowerCamelCase_: str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase_: Any = tokenizer(A_ , return_tensors="""tf""" , padding=A_ )
lowerCamelCase_: int = inputs["""input_ids"""]
lowerCamelCase_: List[str] = model.generate(input_ids=A_ , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase_: Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[int] = model.generate(input_ids=A_ )
lowerCamelCase_: Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase_: Union[str, Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_: int = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowerCamelCase_: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Any = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-350m"""
lowerCamelCase_: Any = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: Dict = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: List[str] = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(A_ , max_length=10 )
lowerCamelCase_: Optional[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 584 | 1 |
'''simple docstring'''
def A__ ( A_ ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__magic_name__ : Dict = int(input('''Enter number: ''').strip())
print(f'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 497 |
'''simple docstring'''
import os
import sys
__magic_name__ : str = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__magic_name__ : List[Any] = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def A__ ( *A_ , **A_ ) -> List[str]:
return AutoConfig.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A__ ( *A_ , **A_ ) -> str:
return AutoTokenizer.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModel.__doc__ )
def A__ ( *A_ , **A_ ) -> Dict:
return AutoModel.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A__ ( *A_ , **A_ ) -> int:
return AutoModelForCausalLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A__ ( *A_ , **A_ ) -> int:
return AutoModelForMaskedLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A__ ( *A_ , **A_ ) -> Any:
return AutoModelForSequenceClassification.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A__ ( *A_ , **A_ ) -> str:
return AutoModelForQuestionAnswering.from_pretrained(*A_ , **A_ )
| 497 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BioGptTokenizer
snake_case_ = False
def __lowercase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
_lowercase : Optional[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
_lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def __lowercase ( self : Tuple , UpperCamelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = '''lower newer'''
_lowercase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
_lowercase : List[str] = '''lower'''
_lowercase : int = ['''low''', '''er</w>''']
_lowercase : Union[str, Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Any = tokens + ['''<unk>''']
_lowercase : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def __lowercase ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_lowercase : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
_lowercase : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 411 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Optional[int] = model_type_to_module_name(snake_case_ )
_lowercase : Optional[Any] = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : int = importlib.import_module('''transformers''' )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : int , ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case_ , encoding='''utf-8''' ) as reader:
return json.load(snake_case_ )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> Tuple:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __lowercase ( cls : str , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
_lowercase : int = kwargs.pop('''config''' , UpperCamelCase_ )
_lowercase : Union[str, Any] = kwargs.pop('''trust_remote_code''' , UpperCamelCase_ )
_lowercase : str = True
_lowercase , _lowercase : int = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Any = config_dict.get('''image_processor_type''' , UpperCamelCase_ )
_lowercase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowercase : str = config_dict.pop('''feature_extractor_type''' , UpperCamelCase_ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_lowercase : Any = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_lowercase : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.image_processor_type``
_lowercase : Optional[int] = getattr(UpperCamelCase_ , '''image_processor_type''' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_lowercase : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_lowercase : int = image_processor_class_from_name(UpperCamelCase_ )
_lowercase : str = image_processor_auto_map is not None
_lowercase : List[str] = image_processor_class is not None or type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING
_lowercase : Tuple = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = kwargs.pop('''code_revision''' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_lowercase : List[str] = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase_ )]
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __lowercase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 411 | 1 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( __A : Tuple ) -> Dict:
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE_ , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE_ ) )] )
def __snake_case ( __A : Optional[int] , __A : int , __A : Dict , __A : str , ) -> List[Any]:
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE_ ):
print(SCREAMING_SNAKE_CASE_ )
return
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE : List[str] = True
create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , SCREAMING_SNAKE_CASE_ )
current_sequence.pop()
SCREAMING_SNAKE_CASE : str = False
A_ : Optional[int] = [3, 1, 2, 4]
generate_all_permutations(sequence)
A_ : Optional[Any] = ['A', 'B', 'C']
generate_all_permutations(sequence_a)
| 265 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["image_processor", "tokenizer"]
UpperCAmelCase_ ="Pix2StructImageProcessor"
UpperCAmelCase_ =("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = False
super().__init__(_A , _A )
def __call__( self , _A=None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 2048 , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(
_A , return_tensors=_A , max_patches=_A , **_A )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ = self.image_processor(
_A , return_tensors=_A , max_patches=_A , header_text=_A , **_A )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ = text_encoding.pop('''input_ids''' )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def _UpperCamelCase ( self , *_A , **_A ) -> int:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> List[str]:
return self.tokenizer.decode(*_A , **_A )
@property
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 714 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["image_processor", "tokenizer"]
UpperCAmelCase_ ="Pix2StructImageProcessor"
UpperCAmelCase_ =("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = False
super().__init__(_A , _A )
def __call__( self , _A=None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 2048 , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(
_A , return_tensors=_A , max_patches=_A , **_A )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ = self.image_processor(
_A , return_tensors=_A , max_patches=_A , header_text=_A , **_A )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ = text_encoding.pop('''input_ids''' )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def _UpperCamelCase ( self , *_A , **_A ) -> int:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> List[str]:
return self.tokenizer.decode(*_A , **_A )
@property
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 597 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class lowerCamelCase__ ( _a ):
a : Optional[int] = """blip_2_vision_model"""
def __init__( self : int , A_ : Any=1_4_0_8 , A_ : Any=6_1_4_4 , A_ : Union[str, Any]=3_9 , A_ : Any=1_6 , A_ : Tuple=2_2_4 , A_ : Union[str, Any]=1_4 , A_ : Any="gelu" , A_ : List[str]=0.0_00_01 , A_ : int=0.0 , A_ : Optional[int]=1e-1_0 , A_ : List[Any]=True , **A_ : List[str] , ):
'''simple docstring'''
super().__init__(**A_ )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , A_ : Union[str, os.PathLike] , **A_ : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__lowercase , __lowercase = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
__lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class lowerCamelCase__ ( _a ):
a : Optional[int] = """blip_2_qformer"""
def __init__( self : Union[str, Any] , A_ : Union[str, Any]=3_0_5_2_2 , A_ : Optional[int]=7_6_8 , A_ : Dict=1_2 , A_ : Dict=1_2 , A_ : Union[str, Any]=3_0_7_2 , A_ : int="gelu" , A_ : List[Any]=0.1 , A_ : Tuple=0.1 , A_ : Union[str, Any]=5_1_2 , A_ : str=0.02 , A_ : Optional[Any]=1e-1_2 , A_ : Tuple=0 , A_ : str="absolute" , A_ : List[Any]=2 , A_ : Any=1_4_0_8 , **A_ : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , A_ : Union[str, os.PathLike] , **A_ : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__lowercase , __lowercase = cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
__lowercase = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class lowerCamelCase__ ( _a ):
a : Union[str, Any] = """blip-2"""
a : Any = True
def __init__( self : str , A_ : str=None , A_ : Tuple=None , A_ : str=None , A_ : int=3_2 , **A_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**A_ )
if vision_config is None:
__lowercase = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
__lowercase = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
__lowercase = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__lowercase = BlipaVisionConfig(**A_ )
__lowercase = BlipaQFormerConfig(**A_ )
__lowercase = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__lowercase = CONFIG_MAPPING[text_model_type](**A_ )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.02
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , A_ : BlipaVisionConfig , A_ : BlipaQFormerConfig , A_ : PretrainedConfig , **A_ : str , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 616 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : int = 20 ):
"""simple docstring"""
__lowercase = 1
for i in range(1 , n + 1 ):
__lowercase = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 616 | 1 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase__( UpperCamelCase__ : str = "https://www.worldometers.info/coronavirus" )->Dict:
A__ = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
A__ = soup.findAll('''h1''' )
A__ = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 717 |
import math
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A__ = range(3 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=1 , **UpperCamelCase__ : int )->List[Any]:
A__ = factor * value
A__ = value
while not is_prime(UpperCamelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCamelCase__ )
return value
| 212 | 0 |
__magic_name__ = {str(digit): digit**5 for digit in range(10)}
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution())
| 254 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """encodec"""
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=2_4000 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=128 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=1024 , _snake_case=None , _snake_case=True , **_snake_case , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_snake_case )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 254 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> Optional[int]:
def wrapper(*snake_case__ :Dict , **snake_case__ :Any ):
_lowercase = timeit.default_timer()
_lowercase = func(*snake_case__ , **snake_case__ )
_lowercase = timeit.default_timer() - starttime
return delta
_lowercase = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE__ ( snake_case__ :dict , snake_case__ :List[str]=100 , snake_case__ :List[Any]=None ) -> List[str]:
_lowercase = []
_lowercase = seq_shapes or {}
for i in range(snake_case__ ):
_lowercase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(snake_case__ , _ArrayXD ):
_lowercase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(snake_case__ , datasets.Value ):
if v.dtype == "string":
_lowercase = 'The small grey turtle was surprisingly fast when challenged.'
else:
_lowercase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(snake_case__ , datasets.Sequence ):
while isinstance(snake_case__ , datasets.Sequence ):
_lowercase = v.feature
_lowercase = seq_shapes[k]
_lowercase = np.random.rand(*snake_case__ ).astype(v.dtype )
_lowercase = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :Tuple , snake_case__ :str=100 , snake_case__ :Optional[Any]=None ) -> Any:
_lowercase = generate_examples(snake_case__ , num_examples=snake_case__ , seq_shapes=snake_case__ )
with ArrowWriter(features=snake_case__ , path=snake_case__ ) as writer:
for key, record in dummy_data:
_lowercase = features.encode_example(snake_case__ )
writer.write(snake_case__ )
_lowercase , _lowercase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
_lowercase = datasets.Dataset.from_file(filename=snake_case__ , info=datasets.DatasetInfo(features=snake_case__ ) )
return dataset | 535 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
) | 535 | 1 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any] ):
_a = OmegaConf.load(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__, map_location="cpu" )["model"]
_a = list(state_dict.keys() )
# extract state_dict for VQVAE
_a = {}
_a = "first_stage_model."
for key in keys:
if key.startswith(lowerCamelCase__ ):
_a = state_dict[key]
# extract state_dict for UNetLDM
_a = {}
_a = "model.diffusion_model."
for key in keys:
if key.startswith(lowerCamelCase__ ):
_a = state_dict[key]
_a = config.model.params.first_stage_config.params
_a = config.model.params.unet_config.params
_a = VQModel(**lowerCamelCase__ ).eval()
vqvae.load_state_dict(lowerCamelCase__ )
_a = UNetLDMModel(**lowerCamelCase__ ).eval()
unet.load_state_dict(lowerCamelCase__ )
_a = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="scaled_linear", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=lowerCamelCase__, )
_a = LDMPipeline(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
pipeline.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
__snake_case : Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 131 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : int = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A ( a ):
__UpperCAmelCase : Dict = """transfo-xl"""
__UpperCAmelCase : Any = ["""mems"""]
__UpperCAmelCase : Any = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=2_6_7_7_3_5 , snake_case_=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , snake_case_=1_0_2_4 , snake_case_=1_0_2_4 , snake_case_=1_6 , snake_case_=6_4 , snake_case_=4_0_9_6 , snake_case_=4 , snake_case_=False , snake_case_=1_8 , snake_case_=1_6_0_0 , snake_case_=1_0_0_0 , snake_case_=True , snake_case_=True , snake_case_=0 , snake_case_=-1 , snake_case_=True , snake_case_=0.1 , snake_case_=0.0 , snake_case_=True , snake_case_="normal" , snake_case_=0.01 , snake_case_=0.01 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=0 , **snake_case_ , ) -> str:
_a = vocab_size
_a = []
self.cutoffs.extend(snake_case_ )
if proj_share_all_but_first:
_a = [False] + [True] * len(self.cutoffs )
else:
_a = [False] + [False] * len(self.cutoffs )
_a = d_model
_a = d_embed
_a = d_head
_a = d_inner
_a = div_val
_a = pre_lnorm
_a = n_layer
_a = n_head
_a = mem_len
_a = same_length
_a = attn_type
_a = clamp_len
_a = sample_softmax
_a = adaptive
_a = dropout
_a = dropatt
_a = untie_r
_a = init
_a = init_range
_a = proj_init_std
_a = init_std
_a = layer_norm_epsilon
super().__init__(eos_token_id=snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Tuple:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self , snake_case_ ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 131 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a__ ,unittest.TestCase ):
__lowerCAmelCase = DebertaVaTokenizer
__lowerCAmelCase = DebertaVaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a_ : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , a_ ):
a_ : Tuple = "this is a test"
a_ : int = "this is a test"
return input_text, output_text
def snake_case_ ( self ):
a_ : Any = "<pad>"
a_ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case_ ( self ):
a_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(lowerCAmelCase__ ) , 3_0_0_0_1 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def snake_case_ ( self ):
# fmt: off
a_ : Tuple = " \tHeLLo!how \n Are yoU? "
a_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
a_ : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
a_ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
a_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def snake_case_ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# fmt: off
a_ : Union[str, Any] = "I was born in 92000, and this is falsé."
a_ : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a_ : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self ):
# fmt: off
a_ : Any = "I was born in 92000, and this is falsé."
a_ : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a_ : Optional[int] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : List[Any] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self ):
# fmt: off
a_ : Optional[int] = "I was born in 92000, and this is falsé."
a_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a_ : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self ):
# fmt: off
a_ : Dict = "I was born in 92000, and this is falsé."
a_ : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a_ : Optional[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : str = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self ):
# fmt: off
a_ : Dict = " \tHeLLo!how \n Are yoU? "
a_ : Optional[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
a_ : List[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a_ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self ):
a_ : List[str] = self.get_tokenizer()
a_ : Optional[Any] = self.get_rust_tokenizer()
a_ : Union[str, Any] = "I was born in 92000, and this is falsé."
a_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
a_ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Any = self.get_rust_tokenizer()
a_ : Dict = tokenizer.encode(lowerCAmelCase__ )
a_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self ):
a_ : Tuple = "This is a test"
a_ : Optional[int] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
a_ : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
a_ : Optional[Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
a_ : int = DebertaVaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
a_ : Union[str, Any] = DebertaVaTokenizerFast(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
a_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : int = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# fmt: off
a_ : Dict = "I was born in 92000, and this is falsé."
a_ : Optional[Any] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
a_ : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
a_ : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a_ : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : int = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a_ : str = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self ):
a_ : Optional[int] = DebertaVaTokenizer(lowerCAmelCase__ )
a_ : Optional[Any] = tokenizer.encode("sequence builders" )
a_ : int = tokenizer.encode("multi-sequence build" )
a_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase__ , )
@slow
def snake_case_ ( self ):
# fmt: off
a_ : Any = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , ) | 709 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = SpeechTaTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a_ : Any = SpeechTaTokenizer(a_ )
a_ : Optional[int] = AddedToken("<mask>" , lstrip=a_ , rstrip=a_ )
a_ : Any = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , a_ ):
a_ : Tuple = "this is a test"
a_ : Any = "this is a test"
return input_text, output_text
def snake_case_ ( self , a_ , a_=False , a_=2_0 , a_=5 ):
a_ , a_ : Optional[Any] = self.get_input_output_texts(a_ )
a_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
a_ : Dict = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
return text, ids
def snake_case_ ( self ):
a_ : List[Any] = "<pad>"
a_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def snake_case_ ( self ):
a_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(a_ ) , 8_1 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def snake_case_ ( self ):
a_ : Any = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
a_ : Dict = tokenizer.vocab_size
a_ : List[str] = len(a_ )
self.assertNotEqual(a_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a_ : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
a_ : int = tokenizer.add_tokens(a_ )
a_ : List[Any] = tokenizer.vocab_size
a_ : Tuple = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size + len(a_ ) )
a_ : str = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a_ : Tuple = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
a_ : Dict = tokenizer.add_special_tokens(a_ )
a_ : Optional[Any] = tokenizer.vocab_size
a_ : Any = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size_a + len(a_ ) )
a_ : Any = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Any = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(a_ , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
a_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
a_ : Tuple = tokenizer.convert_tokens_to_ids(a_ )
# fmt: off
self.assertListEqual(a_ , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
a_ : Tuple = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def snake_case_ ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
a_ : List[Any] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
a_ : Tuple = {
"input_ids": [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=a_ , ) | 370 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : Optional[int] , ) ->int:
'''simple docstring'''
super().__init__()
A__ = value_function
A__ = unet
A__ = scheduler
A__ = env
A__ = env.get_dataset()
A__ = {}
for key in self.data.keys():
try:
A__ = self.data[key].mean()
except: # noqa: E722
pass
A__ = {}
for key in self.data.keys():
try:
A__ = self.data[key].std()
except: # noqa: E722
pass
A__ = env.observation_space.shape[0]
A__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->List[str]:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]) ->Any:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
if type(UpperCAmelCase__) is dict:
return {k: self.to_torch(UpperCAmelCase__) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase__):
return x_in.to(self.unet.device)
return torch.tensor(UpperCAmelCase__ , device=self.unet.device)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]) ->Any:
'''simple docstring'''
for key, val in cond.items():
A__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str) ->List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
A__ = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long)
for _ in range(UpperCAmelCase__):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
A__ = self.value_function(x.permute(0 , 2 , 1) , UpperCAmelCase__).sample
A__ = torch.autograd.grad([y.sum()] , [x])[0]
A__ = self.scheduler._get_variance(UpperCAmelCase__)
A__ = torch.exp(0.5 * posterior_variance)
A__ = model_std * grad
A__ = 0
A__ = x.detach()
A__ = x + scale * grad
A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim)
A__ = self.unet(x.permute(0 , 2 , 1) , UpperCAmelCase__).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
A__ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__)['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim)
A__ = self.to_torch(UpperCAmelCase__)
return x, y
def __call__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=64 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=0.1) ->List[str]:
'''simple docstring'''
A__ = self.normalize(UpperCAmelCase__ , '''observations''')
A__ = obs[None].repeat(UpperCAmelCase__ , axis=0)
A__ = {0: self.to_torch(UpperCAmelCase__)}
A__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
A__ = randn_tensor(UpperCAmelCase__ , device=self.unet.device)
A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim)
A__ = self.to_torch(UpperCAmelCase__)
# run the diffusion process
A__ , A__ = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# sort output trajectories by value
A__ = y.argsort(0 , descending=UpperCAmelCase__).squeeze()
A__ = x[sorted_idx]
A__ = sorted_values[:, :, : self.action_dim]
A__ = actions.detach().cpu().numpy()
A__ = self.de_normalize(UpperCAmelCase__ , key='''actions''')
# select the action with the highest value
if y is not None:
A__ = 0
else:
# if we didn't run value guiding, select a random action
A__ = np.random.randint(0 , UpperCAmelCase__)
A__ = denorm_actions[selected_index, 0]
return denorm_actions
| 87 |
from __future__ import annotations
from collections import namedtuple
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
__lowercase : str = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 149 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
UpperCamelCase_ = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
A : Tuple = '''nezha'''
def __init__( self, A=21_128, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=64, A=2, A=0.02, A=1E-12, A=0.1, A=0, A=2, A=3, A=True, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = max_relative_position
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = classifier_dropout
SCREAMING_SNAKE_CASE : Tuple = use_cache
| 508 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 508 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ ='hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def __UpperCamelCase ( self : Any , a : Optional[int]=0 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(a ) )
SCREAMING_SNAKE_CASE : str = np.random.RandomState(a )
SCREAMING_SNAKE_CASE : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : Tuple = pipe(**a ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : List[Any] = pipe(**a ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
SCREAMING_SNAKE_CASE : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
# warmup pass to apply optimizations
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs() )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : List[Any] = pipe(**a ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
SCREAMING_SNAKE_CASE : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**a ).images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
SCREAMING_SNAKE_CASE : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**a ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
SCREAMING_SNAKE_CASE : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : List[str] = pipe(**a ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ort.SessionOptions()
SCREAMING_SNAKE_CASE : str = False
return options
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
SCREAMING_SNAKE_CASE : Optional[int] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Tuple = "A fantasy landscape, trending on artstation"
SCREAMING_SNAKE_CASE : Optional[int] = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : Any = output.images
SCREAMING_SNAKE_CASE : str = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
SCREAMING_SNAKE_CASE : Dict = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE : Any = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[str] = "A fantasy landscape, trending on artstation"
SCREAMING_SNAKE_CASE : List[Any] = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 25 |
def A__ ( lowerCamelCase = 4_00_00_00 ) -> int:
UpperCamelCase_: Dict = []
UpperCamelCase_, UpperCamelCase_: Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_: int = b, a + b
return sum(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 548 | 0 |
import functools
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase__ ) >= 366:
raise ValueError('All days elements should be less than 366' )
__lowerCAmelCase = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert x is not None
assert y is not None
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = len(UpperCAmelCase__ )
# declaring the array for storing the dp values
__lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCAmelCase = ''
__lowerCAmelCase, __lowerCAmelCase = m, n
while i > 0 and j > 0:
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase = '''AGGTAB'''
lowerCamelCase = '''GXTXAYB'''
lowerCamelCase = 4
lowerCamelCase = '''GTAB'''
lowerCamelCase , lowerCamelCase = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 102 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : str ):
"""simple docstring"""
_snake_case = feature_size
_snake_case = sampling_rate
_snake_case = padding_value
_snake_case = kwargs.pop('''padding_side''' , '''right''' )
_snake_case = kwargs.pop('''return_attention_mask''' , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
_snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_snake_case = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
_snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
_snake_case = '''tf'''
elif is_torch_tensor(__lowerCamelCase ):
_snake_case = '''pt'''
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
_snake_case = '''np'''
else:
raise ValueError(
f"""type of {first_element} unknown: {type(__lowerCamelCase )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_snake_case = to_numpy(__lowerCamelCase )
else:
_snake_case = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_snake_case = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_snake_case = []
for i in range(__lowerCamelCase ):
_snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
_snake_case = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_snake_case = PaddingStrategy.MAX_LENGTH
_snake_case = {}
for i in range(__lowerCamelCase ):
# padding
_snake_case = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_snake_case = []
if value.dtype is np.dtype(np.floataa ):
_snake_case = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
_snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_snake_case = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_snake_case = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_snake_case = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
_snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str=False , __lowerCamelCase : Any=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
_snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = padding
else:
_snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 103 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowerCamelCase : Dict = """bart"""
__lowerCamelCase : Union[str, Any] = True
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case__ : str = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case__ : Dict = qar_model.eval()
else:
snake_case__, snake_case__ : str = (None, None)
if MODEL_TYPE == "bart":
snake_case__ : str = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case__ : str = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case__ : List[Any] = sas_model.eval()
else:
snake_case__, snake_case__ : Any = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
snake_case__ : Optional[int] = faiss.StandardGpuResources()
snake_case__ : int = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case__ : Tuple = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case__ : int = faiss.IndexFlatIP(128 )
snake_case__ : Dict = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
snake_case__, snake_case__ : int = (None, None)
snake_case__ : Tuple = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case__ : Dict = elia["train_eli5"]
snake_case__ : Optional[int] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case__ : List[str] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = load_indexes()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = load_models()
__lowerCamelCase , __lowerCamelCase : List[str] = load_train_data()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[int]=10 ):
snake_case__ : Optional[Any] = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
snake_case__, snake_case__ : int = eli5_train_q_index.search(snake_case_ , snake_case_ )
snake_case__ : Optional[int] = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[Any]="wiki40b" , snake_case_ : Optional[int]="dense" , snake_case_ : List[str]=10 ):
if source == "none":
snake_case__, snake_case__ : Tuple = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case__, snake_case__ : Tuple = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__, snake_case__ : Dict = query_es_index(
snake_case_ , snake_case_ , index_name="english_wiki40b_snippets_100w" , n_results=snake_case_ , )
snake_case__ : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case__ : int = "question: {} context: {}".format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=64 , snake_case_ : List[str]=256 , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[Any]=2 , snake_case_ : str=0.95 , snake_case_ : Optional[Any]=0.8 ):
with torch.no_grad():
snake_case__ : List[str] = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
__lowerCamelCase : Dict = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
__lowerCamelCase : Dict = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowerCamelCase : List[Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowerCamelCase : Optional[int] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
__lowerCamelCase : Dict = st.sidebar.checkbox("""Demo options""")
if demo_options:
__lowerCamelCase : Tuple = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
__lowerCamelCase : Optional[Any] = action_list.index(action_st)
__lowerCamelCase : int = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
__lowerCamelCase : List[Any] = show_type == """Show full text of passages"""
else:
__lowerCamelCase : Any = 3
__lowerCamelCase : str = True
__lowerCamelCase : Optional[Any] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
__lowerCamelCase : Any = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
__lowerCamelCase : List[str] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
__lowerCamelCase : int = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
__lowerCamelCase : Optional[int] = """wiki40b"""
__lowerCamelCase : Optional[Any] = """dense"""
__lowerCamelCase : int = """beam"""
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Any = 64
__lowerCamelCase : List[str] = 256
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = None
__lowerCamelCase : Any = st.sidebar.checkbox("""Generation options""")
if generate_options:
__lowerCamelCase : Optional[Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
__lowerCamelCase : Optional[Any] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
__lowerCamelCase : Optional[Any] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowerCamelCase : List[str] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowerCamelCase : Optional[Any] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowerCamelCase : List[str] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowerCamelCase : str = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowerCamelCase : Any = None
# start main text
__lowerCamelCase : Any = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
__lowerCamelCase : Dict = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowerCamelCase : Optional[Any] = st.text_input("""Enter your question here:""", """""")
else:
__lowerCamelCase : List[str] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowerCamelCase , __lowerCamelCase : Tuple = make_support(question, source=wiki_source, method="""dense""", n_results=10)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
__lowerCamelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowerCamelCase : List[str] = support_list[:10]
__lowerCamelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowerCamelCase , __lowerCamelCase : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
__lowerCamelCase : List[str] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
__lowerCamelCase : str = res[1].strip()
if sec_titles == "":
__lowerCamelCase : Union[str, Any] = """[{}]({})""".format(res[0], wiki_url)
else:
__lowerCamelCase : List[str] = sec_titles.split(""" & """)
__lowerCamelCase : Dict = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
__lowerCamelCase : Optional[Any] = find_nearest_training(question)
__lowerCamelCase : Optional[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
__lowerCamelCase : Union[str, Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
__lowerCamelCase : List[str] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 297 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : list[str] | None = None ):
lowercase__ : List[Any] = word_bank or []
# create a table
lowercase__ : int = len(lowerCamelCase__ ) + 1
lowercase__ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
lowercase__ : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
lowercase__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
) | 128 |
"""simple docstring"""
import os
def _lowerCamelCase ( lowerCamelCase__ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
lowercase__ : Optional[Any] = in_file.read()
lowercase__ : Tuple = [[int(lowerCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
lowercase__ : List[str] = [[0 for cell in row] for row in grid]
lowercase__ : Dict = len(grid[0] )
lowercase__ : List[Any] = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
lowercase__ : Any = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
lowercase__ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
lowercase__ : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
lowercase__ : Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }") | 128 | 1 |
def lowerCAmelCase ( UpperCAmelCase = 100_0000 ) ->int:
"""simple docstring"""
__magic_name__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2, limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i, limit + 1, UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 154 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase_ = logging.get_logger(__name__)
lowercase_ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = "first_exhausted", ) ->DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
else:
return _interleave_iterable_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = 0, ) ->DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : int = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
else:
return _concatenate_iterable_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
| 154 | 1 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ : List[str] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Optional[Any] = str(bin(_lowerCamelCase ) )[2:]
UpperCAmelCase_ : Optional[int] = max(len(_lowerCamelCase ) ,len(_lowerCamelCase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) ,b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''onnx''']
def __init__( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Dict:
requires_backends(self , ["onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> int:
requires_backends(cls , ["onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : str ) -> Optional[Any]:
requires_backends(cls , ["onnx"] )
| 463 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple =nn.functional.normalize(lowerCamelCase )
__magic_name__ : Optional[int] =nn.functional.normalize(lowerCamelCase )
return torch.mm(lowerCamelCase , normalized_text_embeds.t() )
class __A ( UpperCamelCase__ ):
UpperCamelCase = CLIPConfig
UpperCamelCase = ["""CLIPEncoderLayer"""]
def __init__( self :Any , __snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(__snake_case )
__magic_name__ : Optional[Any] =CLIPVisionModel(config.vision_config )
__magic_name__ : Any =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__snake_case )
__magic_name__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__snake_case )
__magic_name__ : int =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__snake_case )
__magic_name__ : List[str] =nn.Parameter(torch.ones(17 ) , requires_grad=__snake_case )
__magic_name__ : Tuple =nn.Parameter(torch.ones(3 ) , requires_grad=__snake_case )
@torch.no_grad()
def A__ ( self :Optional[Any] , __snake_case :List[Any] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.vision_model(__snake_case )[1] # pooled_output
__magic_name__ : Tuple =self.visual_projection(__snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__magic_name__ : List[Any] =cosine_distance(__snake_case , self.special_care_embeds ).cpu().float().numpy()
__magic_name__ : int =cosine_distance(__snake_case , self.concept_embeds ).cpu().float().numpy()
__magic_name__ : Dict =[]
__magic_name__ : Any =image_embeds.shape[0]
for i in range(__snake_case ):
__magic_name__ : Optional[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__magic_name__ : Dict =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__magic_name__ : Optional[Any] =special_cos_dist[i][concept_idx]
__magic_name__ : Any =self.special_care_embeds_weights[concept_idx].item()
__magic_name__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
__magic_name__ : int =0.01
for concept_idx in range(len(cos_dist[0] ) ):
__magic_name__ : Any =cos_dist[i][concept_idx]
__magic_name__ : Union[str, Any] =self.concept_embeds_weights[concept_idx].item()
__magic_name__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__snake_case )
result.append(__snake_case )
__magic_name__ : Optional[int] =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def A__ ( self :Tuple , __snake_case :torch.FloatTensor , __snake_case :torch.FloatTensor ):
'''simple docstring'''
__magic_name__ : Dict =self.vision_model(__snake_case )[1] # pooled_output
__magic_name__ : Union[str, Any] =self.visual_projection(__snake_case )
__magic_name__ : Optional[Any] =cosine_distance(__snake_case , self.special_care_embeds )
__magic_name__ : str =cosine_distance(__snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__magic_name__ : str =0.0
__magic_name__ : Union[str, Any] =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__magic_name__ : Tuple =torch.any(special_scores > 0 , dim=1 )
__magic_name__ : List[Any] =special_care * 0.01
__magic_name__ : int =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__magic_name__ : Optional[int] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__magic_name__ : int =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0
__lowercase =len(_lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowercase =i + 1
else:
__lowercase =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 454 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 454 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return getitem, k
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return setitem, k, v
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return delitem, k
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
"""simple docstring"""
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
lowerCAmelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = HashMap(initial_block_size=4 )
lowercase__ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase__ : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
lowercase__ : Any = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ):
"""simple docstring"""
def is_public(lowerCamelCase__ ) -> bool:
return not name.startswith("_" )
lowercase__ : List[Any] = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
lowercase__ : List[str] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 496 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 1 / 1_2345 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 3
while True:
_SCREAMING_SNAKE_CASE : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : int = int(SCREAMING_SNAKE_CASE__ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 533 | 0 |
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : List[Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : Union[str, Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a_ :str = logging.get_logger(__name__)
a_ :List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
a_ :Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
a_ :Any = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ : str = bs[:]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ : str = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = set()
SCREAMING_SNAKE_CASE__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = char
return pairs
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Any="replace" , _lowercase : List[Any]="<s>" , _lowercase : int="</s>" , _lowercase : Tuple="</s>" , _lowercase : Tuple="<s>" , _lowercase : Tuple="<unk>" , _lowercase : List[Any]="<pad>" , _lowercase : List[Any]="<mask>" , _lowercase : Optional[int]=False , **_lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Tuple = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ : Dict = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase__ ( self : Optional[Any] ):
return len(self.encoder )
def lowercase__ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Tuple , _lowercase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = get_pairs(_lowercase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : Tuple = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = bigram
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Tuple = 0
while i < len(_lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Dict = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = new_word
if len(_lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Any = get_pairs(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = ''' '''.join(_lowercase )
SCREAMING_SNAKE_CASE__ : int = word
return word
def lowercase__ ( self : Optional[Any] , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for token in re.findall(self.pat , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(''' ''' ) )
return bpe_tokens
def lowercase__ ( self : int , _lowercase : List[str] ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : int , _lowercase : int ):
return self.decoder.get(_lowercase )
def lowercase__ ( self : List[str] , _lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : int = ''''''.join(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase__ ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : int = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : str = 0
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : str = token_index
writer.write(''' '''.join(_lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def lowercase__ ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Dict , _lowercase : Dict , _lowercase : List[str]=False , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ : Any = ''' ''' + text
return (text, kwargs)
def lowercase__ ( self : int , _lowercase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowercase : Optional[int] = None , _lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE__ : Any = super()._pad(
encoded_inputs=_lowercase , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE__ : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowercase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE__ : Dict = len(_lowercase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE__ : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE__ : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 250 | 1 |
import math
import tensorflow as tf
from packaging import version
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(lowercase )
SCREAMING_SNAKE_CASE : str = tf.cast(math.pi , x.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(0.044715 , x.dtype )
SCREAMING_SNAKE_CASE : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase , 3 )) ))
return x * cdf
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor(lowercase )
return x * tf.tanh(tf.math.softplus(lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(lowercase )
SCREAMING_SNAKE_CASE : Tuple = tf.cast(0.044715 , x.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(lowercase )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return tf.clip_by_value(_gelu(lowercase ) , -10 , 10 )
def lowerCamelCase__ ( lowercase , lowercase=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = tf.split(lowercase , 2 , axis=lowercase )
return a * tf.math.sigmoid(lowercase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return tf.keras.activations.gelu(lowercase , approximate=lowercase )
snake_case = tf.keras.activations.gelu
snake_case = approximate_gelu_wrap
else:
snake_case = _gelu
snake_case = _gelu_new
snake_case = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 62 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Dict , *snake_case_ : Any , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : List[Any] , *snake_case_ : Any , **snake_case_ : int ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 347 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : List[str] = {'''vocab_file''': '''spiece.model'''}
__lowercase : List[str] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__lowercase : List[str] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__lowercase : Optional[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , A , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , A = None , **A , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase_ : Dict = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
lowerCamelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowerCamelCase_ : str = do_lower_case
lowerCamelCase_ : Dict = remove_space
lowerCamelCase_ : Union[str, Any] = keep_accents
lowerCamelCase_ : List[str] = vocab_file
lowerCamelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase__ (self ):
return len(self.sp_model )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : int = None
return state
def __setstate__(self , A ):
lowerCamelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ : Optional[Any] = {}
lowerCamelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ (self , A ):
if self.remove_space:
lowerCamelCase_ : Union[str, Any] = ''' '''.join(inputs.strip().split() )
else:
lowerCamelCase_ : Tuple = inputs
lowerCamelCase_ : str = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCamelCase_ : int = unicodedata.normalize('''NFKD''' , A )
lowerCamelCase_ : Optional[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
lowerCamelCase_ : Union[str, Any] = outputs.lower()
return outputs
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[Any] = self.preprocess_text(A )
lowerCamelCase_ : Dict = self.sp_model.encode(A , out_type=A )
lowerCamelCase_ : Union[str, Any] = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : Optional[int] = cur_pieces[1:]
else:
lowerCamelCase_ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def UpperCAmelCase__ (self , A ):
return self.sp_model.PieceToId(A )
def UpperCAmelCase__ (self , A ):
return self.sp_model.IdToPiece(A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[Any] = ''''''
lowerCamelCase_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
lowerCamelCase_ : Dict = True
lowerCamelCase_ : List[str] = []
else:
current_sub_tokens.append(A )
lowerCamelCase_ : str = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Tuple = [self.sep_token_id]
lowerCamelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowerCamelCase_ : Any = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 357 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowercase , _lowercase=False ) -> int:
'''simple docstring'''
try:
lowerCamelCase_ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase_ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase_ : List[str] = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__lowercase : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_lowercase )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_lowercase )
def lowercase_ ( _lowercase=None , _lowercase=None ) -> Any:
'''simple docstring'''
if test_case is None:
return partial(_lowercase , version=_lowercase )
return unittest.skipUnless(is_torch_version('''>=''' , _lowercase ) , F"""test requires torch version >= {version}""" )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_lowercase )
__lowercase : str = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_lowercase )
class __lowercase ( unittest.TestCase ):
lowerCamelCase : Optional[Any] = True
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
@classmethod
def UpperCAmelCase__ (cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCAmelCase__ (self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A )
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = mocks if isinstance(A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Tuple = AcceleratorState()
lowerCamelCase_ : Optional[Any] = tensor[None].clone().to(state.device )
lowerCamelCase_ : Dict = gather(_lowercase ).cpu()
lowerCamelCase_ : List[str] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _lowercase ):
return False
return True
class __lowercase :
def __init__(self , A , A , A ):
lowerCamelCase_ : List[Any] = returncode
lowerCamelCase_ : Optional[int] = stdout
lowerCamelCase_ : List[str] = stderr
async def lowercase_ ( _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
while True:
lowerCamelCase_ : Any = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowercase ) )
lowerCamelCase_ : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Optional[int] = []
def tee(_lowercase , _lowercase , _lowercase , _lowercase="" ):
lowerCamelCase_ : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase , _lowercase , file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowercase : tee(_lowercase , _lowercase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _lowercase : tee(_lowercase , _lowercase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_lowercase , )
return _RunOutput(await p.wait() , _lowercase , _lowercase )
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=180 , _lowercase=False , _lowercase=True ) -> _RunOutput:
'''simple docstring'''
lowerCamelCase_ : Any = asyncio.get_event_loop()
lowerCamelCase_ : List[str] = loop.run_until_complete(
_stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase ) )
lowerCamelCase_ : List[Any] = ''' '''.join(_lowercase )
if result.returncode > 0:
lowerCamelCase_ : Optional[int] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __lowercase ( _lowercase ):
pass
def lowercase_ ( _lowercase , _lowercase=False ) -> Dict:
'''simple docstring'''
try:
lowerCamelCase_ : List[str] = subprocess.check_output(_lowercase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowercase , '''decode''' ):
lowerCamelCase_ : str = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(_lowercase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 357 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( _UpperCAmelCase : Dict ) -> List[Any]:
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def A_ ( __a : ArgumentParser ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=__a , default=__a , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=__a , help='Name of the model to download' )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = model
__snake_case : int = cache
__snake_case : List[Any] = force
__snake_case : int = trust_remote_code
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 286 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case__ :
def __init__( self : List[Any] , __a : str , __a : Dict , __a : List[Any] , __a : str , __a : str , __a : List[str]=0.2 , __a : Any=0.2 ) -> Any:
'''simple docstring'''
__snake_case : Any = bp_numa
__snake_case : str = bp_numa
__snake_case : Optional[Any] = bp_numa
__snake_case : Any = conva_get[:2]
__snake_case : Dict = conva_get[2]
__snake_case : Optional[int] = size_pa
__snake_case : str = rate_w
__snake_case : Optional[Any] = rate_t
__snake_case : Optional[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__snake_case : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__snake_case : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__snake_case : Optional[int] = -2 * np.random.rand(self.conva[1] ) + 1
__snake_case : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
__snake_case : str = -2 * np.random.rand(self.num_bpa ) + 1
def A_ ( self : int , __a : Dict ) -> Optional[Any]:
'''simple docstring'''
# save model dict with pickle
__snake_case : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__a , 'wb' ) as f:
pickle.dump(__a , __a )
print(f'''Model saved: {save_path}''' )
@classmethod
def A_ ( cls : List[Any] , __a : Union[str, Any] ) -> int:
'''simple docstring'''
# read saved model
with open(__a , 'rb' ) as f:
__snake_case : Dict = pickle.load(__a ) # noqa: S301
__snake_case : Tuple = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__snake_case : Optional[Any] = model_dic.get('size_pooling1' )
__snake_case : int = model_dic.get('num_bp1' )
__snake_case : Optional[Any] = model_dic.get('num_bp2' )
__snake_case : Optional[Any] = model_dic.get('num_bp3' )
__snake_case : Any = model_dic.get('rate_weight' )
__snake_case : Optional[int] = model_dic.get('rate_thre' )
# create model instance
__snake_case : Any = CNN(__a , __a , __a , __a , __a , __a , __a )
# modify model parameter
__snake_case : Dict = model_dic.get('w_conv1' )
__snake_case : Any = model_dic.get('wkj' )
__snake_case : List[str] = model_dic.get('vji' )
__snake_case : int = model_dic.get('thre_conv1' )
__snake_case : Optional[int] = model_dic.get('thre_bp2' )
__snake_case : List[Any] = model_dic.get('thre_bp3' )
return conv_ins
def A_ ( self : List[Any] , __a : Tuple ) -> Optional[int]:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def A_ ( self : Any , __a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return round(__a , 3 )
def A_ ( self : Optional[Any] , __a : Tuple , __a : List[str] , __a : Dict , __a : Optional[int] , __a : List[str] ) -> str:
'''simple docstring'''
# convolution process
__snake_case : int = convs[0]
__snake_case : List[str] = convs[1]
__snake_case : Optional[Any] = np.shape(__a )[0]
# get the data slice of original image data, data_focus
__snake_case : str = []
for i_focus in range(0 , size_data - size_conv + 1 , __a ):
for j_focus in range(0 , size_data - size_conv + 1 , __a ):
__snake_case : Any = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__a )
# calculate the feature map of every single kernel, and saved as list of matrix
__snake_case : Optional[int] = []
__snake_case : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__a ):
__snake_case : Optional[int] = []
for i_focus in range(len(__a ) ):
__snake_case : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__a ) )
__snake_case : List[Any] = np.asmatrix(__a ).reshape(
__a , __a )
data_featuremap.append(__a )
# expanding the data slice to One dimenssion
__snake_case : Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__a ) )
__snake_case : List[Any] = np.asarray(__a )
return focus_list, data_featuremap
def A_ ( self : Any , __a : int , __a : Tuple , __a : List[Any]="average_pool" ) -> Dict:
'''simple docstring'''
# pooling process
__snake_case : List[str] = len(featuremaps[0] )
__snake_case : Tuple = int(size_map / size_pooling )
__snake_case : int = []
for i_map in range(len(__a ) ):
__snake_case : str = featuremaps[i_map]
__snake_case : Optional[Any] = []
for i_focus in range(0 , __a , __a ):
for j_focus in range(0 , __a , __a ):
__snake_case : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__a ) )
__snake_case : List[str] = np.asmatrix(__a ).reshape(__a , __a )
featuremap_pooled.append(__a )
return featuremap_pooled
def A_ ( self : List[str] , __a : Union[str, Any] ) -> int:
'''simple docstring'''
# expanding three dimension data to one dimension list
__snake_case : Tuple = []
for i in range(len(__a ) ):
__snake_case : Optional[int] = np.shape(data[i] )
__snake_case : List[str] = data[i].reshape(1 , shapes[0] * shapes[1] )
__snake_case : List[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__a )
__snake_case : Optional[int] = np.asarray(__a )
return data_expanded
def A_ ( self : Union[str, Any] , __a : int ) -> Any:
'''simple docstring'''
# expanding matrix to one dimension list
__snake_case : int = np.asarray(__a )
__snake_case : str = np.shape(__a )
__snake_case : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def A_ ( self : List[Any] , __a : str , __a : Optional[int] , __a : List[str] , __a : int , __a : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = []
__snake_case : Tuple = 0
for i_map in range(__a ):
__snake_case : Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , __a , __a ):
for j in range(0 , __a , __a ):
__snake_case : Any = pd_pool[
i_pool
]
__snake_case : List[Any] = i_pool + 1
__snake_case : List[Any] = np.multiply(
__a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__a )
return pd_all
def A_ ( self : Tuple , __a : List[str] , __a : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : Tuple , __a : Optional[int]=bool ) -> List[Any]:
'''simple docstring'''
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__a )) )
print((' - - Shape: Teach_Data ', np.shape(__a )) )
__snake_case : str = 0
__snake_case : List[str] = []
__snake_case : List[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
__snake_case : int = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__a ) ):
# print('------------Learning Image: %d--------------'%p)
__snake_case : List[Any] = np.asmatrix(datas_train[p] )
__snake_case : Optional[Any] = np.asarray(datas_teach[p] )
__snake_case , __snake_case : List[Any] = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__snake_case : Tuple = self.pooling(__a , self.size_poolinga )
__snake_case : Dict = np.shape(__a )
__snake_case : Tuple = self._expand(__a )
__snake_case : str = data_bp_input
__snake_case : List[Any] = np.dot(__a , self.vji.T ) - self.thre_bpa
__snake_case : Any = self.sig(__a )
__snake_case : Tuple = np.dot(__a , self.wkj.T ) - self.thre_bpa
__snake_case : Optional[Any] = self.sig(__a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__snake_case : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(__a , (1 - bp_outa) ) )
__snake_case : Tuple = np.multiply(
np.dot(__a , self.wkj ) , np.multiply(__a , (1 - bp_outa) ) )
__snake_case : Union[str, Any] = np.dot(__a , self.vji )
__snake_case : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
__snake_case : Tuple = pd_conva_pooled.T.getA().tolist()
__snake_case : Optional[Any] = self._calculate_gradient_from_pool(
__a , __a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__snake_case : int = self._expand_mat(pd_conva_all[k_conv] )
__snake_case : Optional[int] = self.rate_weight * np.dot(__a , __a )
__snake_case : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__snake_case : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__snake_case : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__snake_case : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__snake_case : str = self.thre_bpa - pd_k_all * self.rate_thre
__snake_case : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__snake_case : Any = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__snake_case : Tuple = rp + 1
__snake_case : Tuple = error_count / patterns
all_mse.append(__a )
def draw_error():
__snake_case : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__a , '+-' )
plt.plot(__a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A_ ( self : Tuple , __a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
# model predict
__snake_case : str = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__a )) )
for p in range(len(__a ) ):
__snake_case : int = np.asmatrix(datas_test[p] )
__snake_case , __snake_case : str = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__snake_case : List[str] = self.pooling(__a , self.size_poolinga )
__snake_case : List[Any] = self._expand(__a )
__snake_case : Optional[Any] = data_bp_input
__snake_case : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
__snake_case : Any = self.sig(__a )
__snake_case : Any = bp_outa * self.wkj.T - self.thre_bpa
__snake_case : str = self.sig(__a )
produce_out.extend(bp_outa.getA().tolist() )
__snake_case : List[Any] = [list(map(self.do_round , __a ) ) for each in produce_out]
return np.asarray(__a )
def A_ ( self : Optional[Any] , __a : Optional[int] ) -> Tuple:
'''simple docstring'''
# return the data of image after convoluting process so we can check it out
__snake_case : int = np.asmatrix(__a )
__snake_case , __snake_case : int = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__snake_case : Dict = self.pooling(__a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 286 | 1 |
from numpy import exp, pi, sqrt
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_UpperCamelCase = False
try:
_UpperCamelCase = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :str = None , __lowercase :list = [] ):
__lowerCamelCase : Any =0
__lowerCamelCase : List[str] =choices
__lowerCamelCase : int =prompt
if sys.platform == "win32":
__lowerCamelCase : Dict ='''*'''
else:
__lowerCamelCase : Union[str, Any] ='''➔ '''
def __lowercase ( self :Tuple , __lowercase :Any , __lowercase :str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __lowercase )
else:
forceWrite(self.choices[index] , __lowercase )
def __lowercase ( self :Tuple , __lowercase :int ):
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(__lowercase )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def __lowercase ( self :Tuple , __lowercase :Direction , __lowercase :int = 1 ):
__lowerCamelCase : List[str] =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__lowercase )
move_cursor(__lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def __lowercase ( self :Union[str, Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def __lowercase ( self :Union[str, Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def __lowercase ( self :Any ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def __lowercase ( self :Any ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__lowercase )] for number in range(10 )] )
def __lowercase ( self :Any ):
__lowerCamelCase : Tuple =int(chr(self.current_selection ) )
__lowerCamelCase : Dict =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __lowercase )
else:
return
else:
return
def __lowercase ( self :Optional[int] , __lowercase :int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__lowerCamelCase : Union[str, Any] =default_choice
for i in range(len(self.choices ) ):
self.print_choice(__lowercase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Union[str, Any] =int(builtins.input() )
except ValueError:
__lowerCamelCase : Optional[Any] =default_choice
else:
__lowerCamelCase : Dict =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__lowercase , '''\n''' )
return choice
| 363 | 0 |
"""simple docstring"""
from __future__ import annotations
def a ( __UpperCAmelCase : int | str ) -> bool:
__magic_name__: List[str] = str(__UpperCAmelCase )
return n == n[::-1]
def a ( __UpperCAmelCase : int = 1_0_0_0_0_0_0 ) -> Tuple:
__magic_name__: int = 0
for i in range(1 , __UpperCAmelCase ):
if is_palindrome(__UpperCAmelCase ) and is_palindrome(bin(__UpperCAmelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 96 | import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = (UnCLIPScheduler,)
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowerCAmelCase_ )
return config
def snake_case__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def snake_case__ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def snake_case__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def snake_case__ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCAmelCase_ )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def snake_case__ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="fixed_small_log" )
__lowercase = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="learned_range" )
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowerCAmelCase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowerCAmelCase_ ) - -0.0_01_00_11 < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
| 321 | 0 |
def a__ ( a ) -> None:
A_ : List[Any] = generate_pascal_triangle(a )
for row_idx in range(a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def a__ ( a ) -> list[list[int]]:
if not isinstance(a , a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
A_ : list[list[int]] = []
for current_row_idx in range(a ):
A_ : Any = populate_current_row(a , a )
triangle.append(a )
return triangle
def a__ ( a , a ) -> list[int]:
A_ : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A_ : Dict = 1, 1
for current_col_idx in range(1 , a ):
calculate_current_element(
a , a , a , a )
return current_row
def a__ ( a , a , a , a , ) -> None:
A_ : List[str] = triangle[current_row_idx - 1][current_col_idx - 1]
A_ : int = triangle[current_row_idx - 1][current_col_idx]
A_ : Any = above_to_left_elt + above_to_right_elt
def a__ ( a ) -> list[list[int]]:
if not isinstance(a , a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
A_ : list[list[int]] = [[1]]
for row_index in range(1 , a ):
A_ : Union[str, Any] = [0] + result[-1] + [0]
A_ : int = row_index + 1
# Calculate the number of distinct elements in a row
A_ : Union[str, Any] = sum(divmod(a , 2 ) )
A_ : Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A_ : str = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A_ : Optional[int] = row_first_half + row_second_half
result.append(a )
return result
def a__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a , a ) -> None:
A_ : Dict = f"""{func.__name__}({value})"""
A_ : Union[str, Any] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 707 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '▁'
_lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
_lowerCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["""input_ids""", """attention_mask"""]
__magic_name__ = []
__magic_name__ = []
def __init__( self , __magic_name__ , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = None , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
A_ : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , tokenizer_file=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
A_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : int = 1
A_ : Dict = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
A_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
A_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
A_ : Tuple = self.lang_code_to_id[self._src_lang]
A_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : int = None
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
A_ : Optional[int] = [1] * len(self.prefix_tokens )
A_ : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : List[str] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : str = src_lang
A_ : Tuple = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
A_ : Dict = self.convert_tokens_to_ids(__magic_name__ )
A_ : Any = tgt_lang_id
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Optional[Any] = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[int] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : Dict = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = "en_XX" , __magic_name__ = None , __magic_name__ = "ro_RO" , **__magic_name__ , ):
"""simple docstring"""
A_ : List[Any] = src_lang
A_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : int = self.lang_code_to_id[src_lang]
A_ : int = []
A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Union[str, Any] = self.lang_code_to_id[lang]
A_ : Any = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
| 236 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Union[str, Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( __magic_name__ : dict[int, list[int]] ) -> list[tuple[int, int]]:
lowercase : Union[str, Any] =0
lowercase : Tuple =len(__magic_name__ ) # No of vertices in graph
lowercase : Optional[Any] =[0] * n
lowercase : List[str] =[False] * n
def dfs(__magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[int] ):
lowercase : List[str] =True
lowercase : Union[str, Any] =id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ )
lowercase : Dict =min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase : Optional[int] =min(low[at] , low[to] )
lowercase : list[tuple[int, int]] =[]
for i in range(__magic_name__ ):
if not visited[i]:
dfs(__magic_name__ , -1 , __magic_name__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase_ :
'''simple docstring'''
a__ : List[Any] = LEDConfig
a__ : int = {}
a__ : Any = """gelu"""
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=False , __lowercase=99 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=20 , __lowercase=2 , __lowercase=1 , __lowercase=0 , __lowercase=4 , ) -> str:
__UpperCamelCase :Optional[int] = parent
__UpperCamelCase :str = batch_size
__UpperCamelCase :Union[str, Any] = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :Union[str, Any] = use_labels
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :Dict = num_hidden_layers
__UpperCamelCase :Union[str, Any] = num_attention_heads
__UpperCamelCase :Union[str, Any] = intermediate_size
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :str = attention_probs_dropout_prob
__UpperCamelCase :int = max_position_embeddings
__UpperCamelCase :Union[str, Any] = eos_token_id
__UpperCamelCase :Dict = pad_token_id
__UpperCamelCase :List[Any] = bos_token_id
__UpperCamelCase :List[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__UpperCamelCase :Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__UpperCamelCase :Union[str, Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__UpperCamelCase :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__UpperCamelCase :Dict = tf.concat([input_ids, eos_tensor] , axis=1)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__UpperCamelCase :Tuple = prepare_led_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
__UpperCamelCase :Tuple = tf.concat(
[tf.zeros_like(__UpperCamelCase)[:, :-1], tf.ones_like(__UpperCamelCase)[:, -1:]] , axis=-1 , )
__UpperCamelCase :Union[str, Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = TFLEDModel(config=__UpperCamelCase).get_decoder()
__UpperCamelCase :Dict = inputs_dict['''input_ids''']
__UpperCamelCase :str = input_ids[:1, :]
__UpperCamelCase :List[Any] = inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase :int = 1
# first forward pass
__UpperCamelCase :Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase)
__UpperCamelCase , __UpperCamelCase :List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase :Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCamelCase :Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__UpperCamelCase :List[Any] = tf.concat([input_ids, next_tokens] , axis=-1)
__UpperCamelCase :int = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__UpperCamelCase :Tuple = model(__UpperCamelCase , attention_mask=__UpperCamelCase)[0]
__UpperCamelCase :Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__UpperCamelCase :Tuple = int(ids_tensor((1,) , output_from_past.shape[-1]))
__UpperCamelCase :List[str] = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase :Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
__UpperCamelCase :Optional[Any] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase :Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase :Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase :Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
a__ : Union[str, Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a__ : Dict = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
a__ : List[Any] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : List[str] = True
a__ : int = False
a__ : Optional[Any] = False
a__ : Any = False
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[str] = TFLEDModelTester(self)
__UpperCamelCase :Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase)
def UpperCamelCase__ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Optional[Any] = tf.zeros_like(inputs_dict['''attention_mask'''])
__UpperCamelCase :List[Any] = 2
__UpperCamelCase :int = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__UpperCamelCase :Dict = True
__UpperCamelCase :Dict = self.model_tester.seq_length
__UpperCamelCase :List[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowercase):
__UpperCamelCase :int = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowercase):
__UpperCamelCase :int = [t.numpy() for t in outputs.encoder_attentions]
__UpperCamelCase :Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCamelCase) , self.model_tester.num_hidden_layers)
self.assertEqual(len(__UpperCamelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = True
__UpperCamelCase :int = False
__UpperCamelCase :Optional[int] = False
__UpperCamelCase :List[Any] = model_class(__UpperCamelCase)
__UpperCamelCase :int = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase))
__UpperCamelCase :Dict = len(__UpperCamelCase)
self.assertEqual(config.output_hidden_states , __UpperCamelCase)
check_encoder_attentions_output(__UpperCamelCase)
if self.is_encoder_decoder:
__UpperCamelCase :Dict = model_class(__UpperCamelCase)
__UpperCamelCase :Optional[int] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase))
self.assertEqual(config.output_hidden_states , __UpperCamelCase)
check_decoder_attentions_output(__UpperCamelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase :Optional[Any] = True
__UpperCamelCase :str = model_class(__UpperCamelCase)
__UpperCamelCase :Optional[int] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase))
self.assertEqual(config.output_hidden_states , __UpperCamelCase)
check_encoder_attentions_output(__UpperCamelCase)
# Check attention is always last and order is fine
__UpperCamelCase :Dict = True
__UpperCamelCase :int = True
__UpperCamelCase :int = model_class(__UpperCamelCase)
__UpperCamelCase :Dict = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase))
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase)
check_encoder_attentions_output(__UpperCamelCase)
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
return tf.constant(lowercase__ , dtype=tf.intaa )
__lowercase = 1e-4
@slow
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''').led
# change to intended input here
__UpperCamelCase :int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]])
__UpperCamelCase :Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]])
__UpperCamelCase :List[Any] = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase)
__UpperCamelCase :int = model(**__UpperCamelCase)[0]
__UpperCamelCase :List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , __UpperCamelCase)
# change to expected output here
__UpperCamelCase :List[str] = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''')
# change to intended input here
__UpperCamelCase :Any = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]])
__UpperCamelCase :Optional[Any] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]])
__UpperCamelCase :str = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase)
__UpperCamelCase :Optional[int] = model(**__UpperCamelCase)[0]
__UpperCamelCase :Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCamelCase)
# change to expected output here
__UpperCamelCase :Optional[Any] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 , rtol=1E-3)
| 702 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = """unispeech"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=80 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=0.5 , **__lowercase , ) -> Optional[int]:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :str = hidden_size
__UpperCamelCase :List[str] = feat_extract_norm
__UpperCamelCase :str = feat_extract_activation
__UpperCamelCase :str = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :Any = conv_bias
__UpperCamelCase :List[Any] = num_conv_pos_embeddings
__UpperCamelCase :Tuple = num_conv_pos_embedding_groups
__UpperCamelCase :Optional[int] = len(self.conv_dim)
__UpperCamelCase :Optional[int] = num_hidden_layers
__UpperCamelCase :Union[str, Any] = intermediate_size
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :Any = hidden_dropout
__UpperCamelCase :List[str] = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :int = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Optional[Any] = layerdrop
__UpperCamelCase :Any = layer_norm_eps
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Tuple = num_ctc_classes
__UpperCamelCase :Union[str, Any] = vocab_size
__UpperCamelCase :List[Any] = do_stable_layer_norm
__UpperCamelCase :Dict = use_weighted_layer_sum
__UpperCamelCase :str = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Optional[int] = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Any = mask_time_min_masks
__UpperCamelCase :Any = mask_feature_prob
__UpperCamelCase :str = mask_feature_length
__UpperCamelCase :Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :Dict = num_codevector_groups
__UpperCamelCase :Optional[int] = contrastive_logits_temperature
__UpperCamelCase :Union[str, Any] = feat_quantizer_dropout
__UpperCamelCase :List[str] = num_negatives
__UpperCamelCase :Union[str, Any] = codevector_dim
__UpperCamelCase :int = proj_codevector_dim
__UpperCamelCase :Tuple = diversity_loss_weight
# ctc loss
__UpperCamelCase :List[Any] = ctc_loss_reduction
__UpperCamelCase :int = ctc_zero_infinity
# pretraining loss
__UpperCamelCase :Optional[Any] = replace_prob
@property
def UpperCamelCase__ ( self) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 452 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A ( a_ ):
__UpperCAmelCase : "DiagonalGaussianDistribution"
class A ( a_ , a_ ):
__UpperCAmelCase : Union[str, Any] = True
@register_to_config
def __init__( self , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = ("DownEncoderBlock2D",) , snake_case_ = ("UpDecoderBlock2D",) , snake_case_ = (6_4,) , snake_case_ = 1 , snake_case_ = "silu" , snake_case_ = 4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 0.18_215 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
_a = Encoder(
in_channels=snake_case_ , out_channels=snake_case_ , down_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , double_z=snake_case_ , )
# pass init params to Decoder
_a = Decoder(
in_channels=snake_case_ , out_channels=snake_case_ , up_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , norm_num_groups=snake_case_ , act_fn=snake_case_ , )
_a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_a = nn.Convad(snake_case_ , snake_case_ , 1 )
_a = False
_a = False
# only relevant if vae tiling is enabled
_a = self.config.sample_size
_a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_a = 0.25
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False ) -> List[str]:
if isinstance(snake_case_ , (Encoder, Decoder) ):
_a = value
def __lowerCAmelCase ( self , snake_case_ = True ) -> List[Any]:
_a = use_tiling
def __lowerCAmelCase ( self ) -> str:
self.enable_tiling(snake_case_ )
def __lowerCAmelCase ( self ) -> Dict:
_a = True
def __lowerCAmelCase ( self ) -> Tuple:
_a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
_a = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , "set_processor" ):
_a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
_a = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , "set_processor" ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case_ , return_dict=snake_case_ )
if self.use_slicing and x.shape[0] > 1:
_a = [self.encoder(snake_case_ ) for x_slice in x.split(1 )]
_a = torch.cat(snake_case_ )
else:
_a = self.encoder(snake_case_ )
_a = self.quant_conv(snake_case_ )
_a = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case_ , return_dict=snake_case_ )
_a = self.post_quant_conv(snake_case_ )
_a = self.decoder(snake_case_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
@apply_forward_hook
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_a = [self._decode(snake_case_ ).sample for z_slice in z.split(1 )]
_a = torch.cat(snake_case_ )
else:
_a = self._decode(snake_case_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
_a = min(a.shape[2] , b.shape[2] , snake_case_ )
for y in range(snake_case_ ):
_a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_a = min(a.shape[3] , b.shape[3] , snake_case_ )
for x in range(snake_case_ ):
_a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
_a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_a = int(self.tile_latent_min_size * self.tile_overlap_factor )
_a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_a = []
for i in range(0 , x.shape[2] , snake_case_ ):
_a = []
for j in range(0 , x.shape[3] , snake_case_ ):
_a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_a = self.encoder(snake_case_ )
_a = self.quant_conv(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
_a = []
for i, row in enumerate(snake_case_ ):
_a = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_a = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
_a = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
_a = torch.cat(snake_case_ , dim=2 )
_a = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_a = int(self.tile_sample_min_size * self.tile_overlap_factor )
_a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_a = []
for i in range(0 , z.shape[2] , snake_case_ ):
_a = []
for j in range(0 , z.shape[3] , snake_case_ ):
_a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_a = self.post_quant_conv(snake_case_ )
_a = self.decoder(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
_a = []
for i, row in enumerate(snake_case_ ):
_a = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_a = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
_a = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
_a = torch.cat(snake_case_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = True , snake_case_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_a = sample
_a = self.encode(snake_case_ ).latent_dist
if sample_posterior:
_a = posterior.sample(generator=snake_case_ )
else:
_a = posterior.mode()
_a = self.decode(snake_case_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
| 131 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase : Any = random.Random()
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase = global_rng
lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=24 , A=24 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = min_seq_length
lowerCamelCase = max_seq_length
lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase = feature_size
lowerCamelCase = num_mel_bins
lowerCamelCase = padding_value
lowerCamelCase = sampling_rate
lowerCamelCase = return_attention_mask
lowerCamelCase = do_normalize
def __A ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A=False , A=False ) -> Tuple:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = SpeechaTextFeatureExtractionTester(self )
def __A ( self , A ) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase = feature_extractor(A , padding=A , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCamelCase = np.asarray(A )
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , padding=A , max_length=A , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , max_length=A , padding=A , return_tensors="""np""" , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""max_length""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=16 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A ) -> Any:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase = self._load_datasamples(1 )
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = feature_extractor(A , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1e-4 ) )
| 457 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Union[str, Any] = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """visual_bert"""
def __init__( self : int , __UpperCamelCase : List[Any]=3_0_5_2_2 , __UpperCamelCase : Any=7_6_8 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Tuple=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : int=3_0_7_2 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[Any]=5_1_2 , __UpperCamelCase : Any=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : List[Any]=1e-12 , __UpperCamelCase : str=False , __UpperCamelCase : Tuple=True , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : str=0 , __UpperCamelCase : List[str]=2 , **__UpperCamelCase : List[str] , )->List[str]:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = visual_embedding_dim
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = bypass_transformer
_UpperCAmelCase = special_visual_initialize
| 95 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__A : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase)
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , **__UpperCamelCase : Optional[Any] )->List[Any]:
super().__init__(**__UpperCamelCase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Optional[int] , __UpperCamelCase : Union[np.ndarray, bytes, str] , **__UpperCamelCase : Tuple )->List[str]:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , **__UpperCamelCase : Any )->Union[str, Any]:
_UpperCAmelCase = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_UpperCAmelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple="This is a sound of {}." )->int:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCAmelCase = requests.get(__UpperCamelCase ).content
else:
with open(__UpperCamelCase , '''rb''' ) as f:
_UpperCAmelCase = f.read()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = ffmpeg_read(__UpperCamelCase , self.feature_extractor.sampling_rate )
if not isinstance(__UpperCamelCase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
_UpperCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
_UpperCAmelCase = candidate_labels
_UpperCAmelCase = [hypothesis_template.format(__UpperCamelCase ) for x in candidate_labels]
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , return_tensors=self.framework , padding=__UpperCamelCase )
_UpperCAmelCase = [text_inputs]
return inputs
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Any:
_UpperCAmelCase = model_inputs.pop('''candidate_labels''' )
_UpperCAmelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __UpperCamelCase ):
_UpperCAmelCase = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase = text_inputs[0][0]
_UpperCAmelCase = self.model(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self : List[str] , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = model_outputs.pop('''candidate_labels''' )
_UpperCAmelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
_UpperCAmelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : -x[0] )
]
return result
| 95 | 1 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """autoformer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : bool = True , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 64 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 32 , a_ : int = 32 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 100 , a_ : float = 0.02 , a_ : bool = True , a_ : Union[str, Any]=True , a_ : int = 10 , a_ : int = 25 , a_ : int = 3 , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = prediction_length
__snake_case = context_length if context_length is not None else prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Autoformer
__snake_case = label_length
__snake_case = moving_average
__snake_case = autocorrelation_factor
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 69 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : List[str] , a_ : Tuple=3 , a_ : Any=7 , a_ : Any=True , a_ : Union[str, Any]=True , a_ : Tuple=False , a_ : Optional[int]=True , a_ : Any=99 , a_ : Dict=32 , a_ : Dict=5 , a_ : List[Any]=4 , a_ : Any=37 , a_ : Any="gelu" , a_ : List[str]=0.1 , a_ : Dict=0.1 , a_ : Optional[Any]=512 , a_ : List[Any]=16 , a_ : Any=2 , a_ : str=0.02 , a_ : Any=3 , a_ : List[Any]=4 , a_ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : Any ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a_ , )
def A ( self : List[str] , a_ : Dict , a_ : Tuple , a_ : Optional[Any] , a_ : Dict , a_ : Dict , a_ : Dict , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = FalconModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Any , a_ : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Tuple , a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = True
__snake_case = FalconModel(a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
__snake_case = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] , a_ : int , a_ : int , a_ : List[Any] , a_ : str , a_ : List[str] , a_ : str , a_ : str , a_ : Union[str, Any] , a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : str , a_ : Tuple , a_ : str , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Dict , ):
"""simple docstring"""
__snake_case = True
__snake_case = True
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )["hidden_states"][0]
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )["hidden_states"][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FalconForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = FalconModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , *__snake_case = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__snake_case = alibi
self.model_tester.create_and_check_model(a_ , *a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "single_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = input_dict["input_ids"]
__snake_case = FalconForCausalLM(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , use_cache=a_ )
__snake_case = input_ids.shape[0]
__snake_case = model._convert_to_rw_cache(result.past_key_values )
__snake_case = model._convert_cache_to_standard_format(a_ , a_ )
for layer in range(len(a_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "multi_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Dict ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a_ , "use_cache" ):
return
__snake_case = model_class(a_ ).to(a_ )
if "use_cache" not in inputs:
__snake_case = True
__snake_case = model(**a_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__snake_case = (
getattr(a_ , "decoder_layers" , a_ )
or getattr(a_ , "num_decoder_layers" , a_ )
or config.num_hidden_layers
)
__snake_case = getattr(a_ , "num_kv_heads" , config.num_attention_heads )
__snake_case = getattr(a_ , "d_model" , config.hidden_size )
__snake_case = embed_dim // num_attention_heads
__snake_case = outputs["past_key_values"]
self.assertEqual(len(a_ ) , a_ )
__snake_case , __snake_case = inputs["input_ids"].shape
for i in range(a_ ):
if config.new_decoder_architecture:
__snake_case = config.num_attention_heads
elif config.multi_query:
__snake_case = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
__snake_case = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
__snake_case = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=19 )
__snake_case = tokenizer.batch_decode(a_ )[0]
self.assertEqual(a_ , a_ )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , num_beams=2 , max_new_tokens=4 )
@slow
def A ( self : Any ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(device=a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# Test results are the same with and without cache
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 69 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( lowerCAmelCase ):
snake_case__ : List[Any] = ['image_processor', 'tokenizer']
snake_case__ : str = 'BlipImageProcessor'
snake_case__ : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowercase = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowercase = self.tokenizer
lowercase = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
lowercase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
lowercase = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
lowercase = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 197 | """simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowercase = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCAmelCase__ )
lowercase = [True] * (num + 1)
lowercase = []
lowercase = 2
lowercase = int(math.sqrt(lowerCAmelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase__ ):
if sieve[i] is True:
lowercase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 197 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : ScoreSdeVeScheduler
def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : str = self.unet.config.sample_size
__a : Any = (batch_size, 3, img_size, img_size)
__a : List[str] = self.unet
__a : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ) * self.scheduler.init_noise_sigma
__a : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
self.scheduler.set_sigmas(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__a : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__a : Any = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
__a : List[str] = self.scheduler.step_correct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# prediction step
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
__a : Any = self.scheduler.step_pred(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
__a , __a : int = output.prev_sample, output.prev_sample_mean
__a : Tuple = sample_mean.clamp(0 , 1 )
__a : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : str = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 47 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''align_text_model'''
def __init__( self : int , __a : Optional[int]=30522 , __a : int=768 , __a : Optional[Any]=12 , __a : Any=12 , __a : Tuple=3072 , __a : Tuple="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Dict=512 , __a : List[Any]=2 , __a : Dict=0.02 , __a : Optional[int]=1E-12 , __a : int=0 , __a : Optional[int]="absolute" , __a : Tuple=True , **__a : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = vocab_size
__lowercase : Dict = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Optional[Any] = hidden_act
__lowercase : Tuple = intermediate_size
__lowercase : List[str] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = position_embedding_type
__lowercase : Union[str, Any] = use_cache
__lowercase : int = pad_token_id
@classmethod
def lowerCAmelCase ( cls : Tuple , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : List[Any] = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''align_vision_model'''
def __init__( self : List[str] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.001 , __a : float = 0.99 , __a : float = 0.2 , **__a : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Any = num_channels
__lowercase : Tuple = image_size
__lowercase : Tuple = width_coefficient
__lowercase : Any = depth_coefficient
__lowercase : str = depth_divisor
__lowercase : Union[str, Any] = kernel_sizes
__lowercase : int = in_channels
__lowercase : List[Any] = out_channels
__lowercase : int = depthwise_padding
__lowercase : Union[str, Any] = strides
__lowercase : Optional[int] = num_block_repeats
__lowercase : List[str] = expand_ratios
__lowercase : int = squeeze_expansion_ratio
__lowercase : str = hidden_act
__lowercase : List[str] = hidden_dim
__lowercase : Dict = pooling_type
__lowercase : Any = initializer_range
__lowercase : Tuple = batch_norm_eps
__lowercase : int = batch_norm_momentum
__lowercase : Tuple = drop_connect_rate
__lowercase : Tuple = sum(__a ) * 4
@classmethod
def lowerCAmelCase ( cls : str , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : Optional[int] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[Any] , __a : Optional[int]=None , __a : str=None , __a : int=640 , __a : List[Any]=1.0 , __a : Optional[int]=0.02 , **__a : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
if text_config is None:
__lowercase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__lowercase : Dict = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__lowercase : str = AlignTextConfig(**__a )
__lowercase : int = AlignVisionConfig(**__a )
__lowercase : str = projection_dim
__lowercase : Optional[int] = temperature_init_value
__lowercase : Dict = initializer_range
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : AlignTextConfig , __a : AlignVisionConfig , **__a : Any ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = copy.deepcopy(self.__dict__ )
__lowercase : Tuple = self.text_config.to_dict()
__lowercase : List[Any] = self.vision_config.to_dict()
__lowercase : List[str] = self.__class__.model_type
return output | 149 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "AAPL" ) -> str:
snake_case = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 517 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = "docs/source/en/_toctree.yml"
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> Optional[int]:
snake_case = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case = [key for key, value in counts.items() if value > 1]
snake_case = []
for duplicate_key in duplicates:
snake_case = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
def __lowerCamelCase ( __lowerCAmelCase : Any=False ) -> Optional[Any]:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case = yaml.safe_load(f.read() )
# Get to the API doc
snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case = content[api_idx]["""sections"""]
# Then to the model doc
snake_case = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case = api_doc[model_idx]["""sections"""]
snake_case = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if """sections""" in section]
snake_case = False
for idx, modality_doc in modalities_docs:
snake_case = modality_doc["""sections"""]
snake_case = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
snake_case = True
if overwrite:
snake_case = new_modality_doc
if diff:
if overwrite:
snake_case = model_doc
snake_case = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 517 | 1 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = 50 # max width of layer names
__magic_name__ = 70 # max width of quantizer names
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=__lowercase , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=__lowercase , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=__lowercase , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=__lowercase , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=__lowercase , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=__lowercase , type=__lowercase , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=__lowercase , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def _A ( __lowercase ):
"""simple docstring"""
if args.calibrator == "max":
lowerCamelCase__ = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
lowerCamelCase__ = """histogram"""
elif args.calibrator == "mse":
lowerCamelCase__ = """histogram"""
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
lowerCamelCase__ = QuantDescriptor(num_bits=args.aprec , calib_method=__lowercase )
lowerCamelCase__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowercase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowercase )
def _A ( __lowercase , __lowercase , __lowercase=False , __lowercase=False ):
"""simple docstring"""
logger.info("""Configuring Model for Quantization""" )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowercase , ["""embeddings"""] , which="""weight""" , _disabled=__lowercase )
if args.quant_disable:
set_quantizer_by_name(__lowercase , [""""""] , _disabled=__lowercase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowercase , args.quant_disable_keyword , _disabled=__lowercase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowercase , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=__lowercase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowercase , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=__lowercase )
if args.recalibrate_weights:
recalibrate_weights(__lowercase )
if args.fuse_qkv:
fuse_qkv(__lowercase , __lowercase )
if args.clip_gelu:
clip_gelu(__lowercase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowercase )
def _A ( __lowercase ):
"""simple docstring"""
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowercase )
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
def fusea(__lowercase , __lowercase , __lowercase ):
for mod in [qq, qk, qv]:
if not hasattr(__lowercase , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
lowerCamelCase__ = qq._amax.detach().item()
lowerCamelCase__ = qk._amax.detach().item()
lowerCamelCase__ = qv._amax.detach().item()
lowerCamelCase__ = max(__lowercase , __lowercase , __lowercase )
qq._amax.fill_(__lowercase )
qk._amax.fill_(__lowercase )
qv._amax.fill_(__lowercase )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
lowerCamelCase__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowercase )
lowerCamelCase__ = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def _A ( __lowercase ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowercase , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
lowerCamelCase__ = mod.weight.shape[0]
lowerCamelCase__ = mod._weight_quantizer._amax.detach()
lowerCamelCase__ = torch.ones(__lowercase , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def _A ( __lowercase ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowercase , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase__ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowercase , keepdims=__lowercase ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowerCamelCase__ = amax
def _A ( __lowercase , __lowercase=25 , __lowercase=180 , __lowercase=None ):
"""simple docstring"""
if ignore is None:
lowerCamelCase__ = []
elif not isinstance(__lowercase , __lowercase ):
lowerCamelCase__ = [ignore]
lowerCamelCase__ = 0
for name, mod in model.named_modules():
if not hasattr(__lowercase , """weight""" ):
continue
lowerCamelCase__ = max(__lowercase , len(__lowercase ) )
for name, mod in model.named_modules():
lowerCamelCase__ = getattr(__lowercase , """_input_quantizer""" , __lowercase )
lowerCamelCase__ = getattr(__lowercase , """_weight_quantizer""" , __lowercase )
if not hasattr(__lowercase , """weight""" ):
continue
if type(__lowercase ) in ignore:
continue
if [True for s in ignore if type(__lowercase ) is str and s in name]:
continue
lowerCamelCase__ = f"""Act:{input_q.extra_repr()}"""
lowerCamelCase__ = f"""Wgt:{weight_q.extra_repr()}"""
lowerCamelCase__ = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(__lowercase ) <= line_width:
logger.info(__lowercase )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{' ':{name_width}} {wgt_str}""" )
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = 0
for name, mod in model.named_modules():
if isinstance(__lowercase , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = getattr(__lowercase , __lowercase , __lowercase )
if quantizer_mod is not None:
assert hasattr(__lowercase , __lowercase )
setattr(__lowercase , __lowercase , __lowercase )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def _A ( __lowercase , __lowercase , __lowercase="both" , **__lowercase ):
"""simple docstring"""
lowerCamelCase__ = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(__lowercase , __lowercase , """_input_quantizer""" , __lowercase , __lowercase )
if which in ["weight", "both"]:
set_quantizer(__lowercase , __lowercase , """_weight_quantizer""" , __lowercase , __lowercase )
logger.info(__lowercase )
def _A ( __lowercase , __lowercase , **__lowercase ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowercase , """_input_quantizer""" ) or hasattr(__lowercase , """_weight_quantizer""" ):
for n in names:
if re.search(__lowercase , __lowercase ):
set_quantizers(__lowercase , __lowercase , **__lowercase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(__lowercase , __lowercase ):
lowerCamelCase__ = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(__lowercase , __lowercase , __lowercase )
logger.info(__lowercase )
| 129 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 129 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__magic_name__ = TypeVar('T')
__magic_name__ = TypeVar('U')
class __lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ,_a : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Any = key
A_ : List[Any] = val
A_ : DoubleLinkedListNode[T, U] | None = None
A_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : str ):
'''simple docstring'''
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
A_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ ,lowercase_ )
A_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ ,lowercase_ )
A_ : Union[str, Any] = self.rear, self.head
def __repr__( self : List[str] ):
'''simple docstring'''
A_ : Dict = ["DoubleLinkedList"]
A_ : Dict = self.head
while node.next is not None:
rep.append(str(lowercase_ ) )
A_ : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase_ )
def _a ( self : int ,_a : List[Any] ):
'''simple docstring'''
A_ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
A_ : Tuple = node
A_ : str = previous
A_ : Optional[Any] = node
A_ : Any = self.rear
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
A_ : Union[str, Any] = node.next
A_ : Optional[int] = node.prev
A_ : str = None
A_ : int = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
a_ = {}
def __init__( self : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : DoubleLinkedList[T, U] = DoubleLinkedList()
A_ : List[str] = capacity
A_ : Any = 0
A_ : Dict = 0
A_ : Union[str, Any] = 0
A_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : str ):
'''simple docstring'''
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str ,_a : int ):
'''simple docstring'''
return key in self.cache
def _a ( self : Any ,_a : int ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
A_ : DoubleLinkedListNode[T, U] = self.cache[key]
A_ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase_ )
return node.val
self.miss += 1
return None
def _a ( self : Tuple ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
A_ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
A_ : Dict = DoubleLinkedListNode(lowercase_ ,lowercase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
A_ : List[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
A_ : Optional[Any] = value
self.list.add(lowercase_ )
@classmethod
def _a ( cls : Optional[Any] ,_a : str = 128 ):
'''simple docstring'''
def cache_decorator_inner(_a : str ) -> Callable[..., U]:
def cache_decorator_wrapper(*_a : Any ) -> U:
if func not in cls.decorator_function_to_instance_map:
A_ : List[str] = LRUCache(lowercase_ )
A_ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
A_ : Any = func(*lowercase_ )
cls.decorator_function_to_instance_map[func].put(args[0] ,lowercase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase_ ,"""cache_info""" ,lowercase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
SCREAMING_SNAKE_CASE = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
SCREAMING_SNAKE_CASE = field(default=2 , metadata={"help": "Batch size for training."} )
SCREAMING_SNAKE_CASE = field(default=2 , metadata={"help": "Batch size for evaluation."} )
SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"help": "Value of weight decay."} )
SCREAMING_SNAKE_CASE = field(
default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"help": "Learning rate fo training."} )
SCREAMING_SNAKE_CASE = field(default="cosine" , metadata={"help": "Learning rate."} )
SCREAMING_SNAKE_CASE = field(
default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
SCREAMING_SNAKE_CASE = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} )
SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
SCREAMING_SNAKE_CASE = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} )
SCREAMING_SNAKE_CASE = field(default=1 , metadata={"help": "Training seed."} )
SCREAMING_SNAKE_CASE = field(
default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
SCREAMING_SNAKE_CASE = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
SCREAMING_SNAKE_CASE = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} )
SCREAMING_SNAKE_CASE = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "Number of workers used for code evaluation."} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Sample from the language model's output distribution."} )
SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
SCREAMING_SNAKE_CASE = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} )
SCREAMING_SNAKE_CASE = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
SCREAMING_SNAKE_CASE = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
SCREAMING_SNAKE_CASE = field(
default=2_00 , metadata={"help": "Number of completions to generate for each sample."} )
SCREAMING_SNAKE_CASE = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
SCREAMING_SNAKE_CASE = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
SCREAMING_SNAKE_CASE = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
SCREAMING_SNAKE_CASE = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
SCREAMING_SNAKE_CASE = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
SCREAMING_SNAKE_CASE = field(
default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} )
SCREAMING_SNAKE_CASE = field(default="content" , metadata={"help": "Column containing text data to process."} )
SCREAMING_SNAKE_CASE = field(
default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
SCREAMING_SNAKE_CASE = field(
default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "If True, near-duplicate samples are removed."} )
SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
SCREAMING_SNAKE_CASE = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
SCREAMING_SNAKE_CASE = field(default="content" , metadata={"help": "Column containing text data to process."} )
SCREAMING_SNAKE_CASE = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} )
SCREAMING_SNAKE_CASE = field(
default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} )
SCREAMING_SNAKE_CASE = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
SCREAMING_SNAKE_CASE = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
SCREAMING_SNAKE_CASE = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
SCREAMING_SNAKE_CASE = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "Push saved tokenizer to the hub."} )
| 637 |
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] ) -> Any:
"""simple docstring"""
stooge(__lowercase , 0 , len(__lowercase ) - 1 )
return arr
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : Dict , __lowercase : Dict ) -> int:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__A , __A = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__A = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowercase , __lowercase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowercase , i + t , (__lowercase) )
# Recursively sort first 2/3 elements
stooge(__lowercase , __lowercase , (h - t) )
if __name__ == "__main__":
__a : List[Any] = input("Enter numbers separated by a comma:\n").strip()
__a : List[str] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 637 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCamelCase :
'''simple docstring'''
@staticmethod
def snake_case__ ( *__lowercase , **__lowercase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
__A : Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
__A : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Tuple = image_classifier(A_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A_ ) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
__A : int = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
] , )
@require_tf
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
__A : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : List[Any] = image_classifier(A_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(A_ ) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
__A : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
] , )
@slow
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
__A : List[Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
__A : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : List[str] = image_classifier(A_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A_ ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
__A : Dict = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
__A : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : int = image_classifier(A_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A_ ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
__A : Dict = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 700 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
__A : str = load_dataset('ashraq/esc50' )
__A : Union[str, Any] = dataset['train']['audio'][-1]['array']
__A : List[str] = audio_classifier(__lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def snake_case__ ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
__A : Dict = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
__A : Optional[Any] = load_dataset('ashraq/esc50' )
__A : List[str] = dataset['train']['audio'][-1]['array']
__A : Union[str, Any] = audio_classifier(__lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
] , )
__A : Any = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
__A : str = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def snake_case__ ( self ):
"""simple docstring"""
pass
| 540 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__a = input('Enter image url: ').strip()
print(f"Downloading image from {url} ...")
__a = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__a = soup.find('meta', {'property': 'og:image'})['content']
__a = requests.get(image_url).content
__a = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"Done. Image saved to disk as {file_name}.")
| 97 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 7 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 664 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 664 | 1 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__snake_case = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
__UpperCamelCase = simple_accuracy(_lowercase , _lowercase )
__UpperCamelCase = fa_score(y_true=_lowercase , y_pred=_lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
__UpperCamelCase = pearsonr(_lowercase , _lowercase )[0]
__UpperCamelCase = spearmanr(_lowercase , _lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
assert len(_lowercase ) == len(_lowercase ), f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowercase , _lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mrpc":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowercase , _lowercase )
elif task_name == "qqp":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
if len(_lowercase ) != len(_lowercase ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
| 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def lowercase ( a , a , a , a , a , a , a ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ :int = min(a , a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ :Dict = max(a , a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ :str = Pipe()
SCREAMING_SNAKE_CASE_ :Optional[Any] = Pipe()
process_array_.append(
Process(
target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = temp_rs
SCREAMING_SNAKE_CASE_ :Any = temp_rr
for i in range(1 , len(a ) - 1 ):
SCREAMING_SNAKE_CASE_ :int = Pipe()
SCREAMING_SNAKE_CASE_ :Dict = Pipe()
process_array_.append(
Process(
target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = temp_rs
SCREAMING_SNAKE_CASE_ :int = temp_rr
process_array_.append(
Process(
target=a , args=(
len(a ) - 1,
arr[len(a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a ) ):
SCREAMING_SNAKE_CASE_ :Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*a )
SCREAMING_SNAKE_CASE_ :int = odd_even_transposition(a )
print("Sorted List\n" )
print(*a )
if __name__ == "__main__":
main()
| 631 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: Optional[Any] ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=__UpperCamelCase , )
assert hasattr(self , '''env''' )
def _A ( self: Dict , __UpperCamelCase: Tuple ):
# configuration for running training on smdistributed Model Parallel
_a = {
'''enabled''': True,
'''processes_per_host''': 8,
}
_a = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
_a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
_a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='''py36''' , )
def _A ( self: Optional[Any] , __UpperCamelCase: Optional[Any] ):
TrainingJobAnalytics(__UpperCamelCase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def _A ( self: Union[str, Any] , __UpperCamelCase: List[str] ):
# create estimator
_a = self.create_estimator(__UpperCamelCase )
# run training
estimator.fit()
# result dataframe
_a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCamelCase )
| 701 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase :Dict = random.Random()
def __snake_case ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: int=7 , __UpperCamelCase: Any=400 , __UpperCamelCase: List[str]=2000 , __UpperCamelCase: Union[str, Any]=2048 , __UpperCamelCase: int=128 , __UpperCamelCase: Optional[int]=1 , __UpperCamelCase: Tuple=512 , __UpperCamelCase: List[Any]=30 , __UpperCamelCase: Dict=4_4100 , ):
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = spectrogram_length
_a = feature_size
_a = num_audio_channels
_a = hop_length
_a = chunk_length
_a = sampling_rate
def _A ( self: int ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _A ( self: List[Any] , __UpperCamelCase: List[Any]=False , __UpperCamelCase: List[str]=False ):
def _flatten(__UpperCamelCase: Tuple ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Union[str, Any] = TvltFeatureExtractor
def _A ( self: Optional[Any] ):
_a = TvltFeatureExtractionTester(self )
def _A ( self: Optional[Any] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''sampling_rate''' ) )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_a = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__UpperCamelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__UpperCamelCase )
_a = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
# Initialize feature_extractor
_a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a = feature_extractor(
__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=__UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a = np.asarray(__UpperCamelCase )
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _A ( self: Optional[int] , __UpperCamelCase: Dict ):
_a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a = ds.sort('''id''' ).select(range(__UpperCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _A ( self: Optional[Any] ):
_a = self._load_datasamples(1 )
_a = TvltFeatureExtractor()
_a = feature_extractor(__UpperCamelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_a = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCamelCase , atol=1E-4 ) )
| 346 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 10**-10 ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : str = a
while True:
lowerCAmelCase_ : Optional[int] = Decimal(_A ) - (
Decimal(eval(_A ) ) / Decimal(eval(str(diff(_A ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_A ) ) < precision: # noqa: S307
return float(_A )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 610 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , __lowerCamelCase : int = 128 , __lowerCamelCase : int = 256 , __lowerCamelCase : float = 2000.0 , __lowerCamelCase : int = 768 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 2048 , __lowerCamelCase : float = 0.1 , ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE__ = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE__ = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE__ = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE__ = self.position_encoding(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
SCREAMING_SNAKE_CASE__ = self.dropout(__lowerCamelCase )
# decoder: No padding present.
SCREAMING_SNAKE_CASE__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE__ = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE__ = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = self.decoder_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.post_dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.spec_out(__lowerCamelCase )
return spec_out
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=1e-6 ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE__ = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE__ = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict:
# pre_self_attention_layer_norm
SCREAMING_SNAKE_CASE__ = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
SCREAMING_SNAKE_CASE__ = self.attention(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
def lowercase_ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.layer_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=None ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.film(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.DenseReluDense(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = NewGELUActivation()
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.act(self.wi_a(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = self.wi_a(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE__ = self.dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.wo(__lowerCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]=1e-6 ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.ones(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = eps
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
SCREAMING_SNAKE_CASE__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def lowercase_ ( self : str , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.scale_bias(__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torch.chunk(__lowerCamelCase , 2 , -1 )
SCREAMING_SNAKE_CASE__ = x * (1 + scale) + shift
return x
| 493 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a_ : Union[str, Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if rng is None:
SCREAMING_SNAKE_CASE = random.Random()
SCREAMING_SNAKE_CASE = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE = []
for _ in range(SCREAMING_SNAKE_CASE__):
values.append(rng.randint(0 , vocab_size - 1))
SCREAMING_SNAKE_CASE = np.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa).reshape(SCREAMING_SNAKE_CASE__)
return output
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = ids_tensor(SCREAMING_SNAKE_CASE__ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE__)
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE = 1
return attn_mask
@require_flax
class _snake_case :
_lowercase : Optional[int] = None
_lowercase : Tuple = ()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = inputs["""input_ids"""].shape[-1] // 2
SCREAMING_SNAKE_CASE = inputs["""input_ids"""][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE = jnp.ones_like(lowercase__)
SCREAMING_SNAKE_CASE = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE = getattr(lowercase__ , lowercase__)
SCREAMING_SNAKE_CASE = pt_model_class(lowercase__).eval()
SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(lowercase__ , flax_model.params)
SCREAMING_SNAKE_CASE = flax_model.generate(lowercase__).sequences
SCREAMING_SNAKE_CASE = pt_model.generate(torch.tensor(lowercase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 0.8
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = 0.3
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__ , attention_mask=lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__ , attention_mask=lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__ , attention_mask=lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__ , attention_mask=lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowercase__)
SCREAMING_SNAKE_CASE = model.generate(lowercase__ , attention_mask=lowercase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__)
SCREAMING_SNAKE_CASE = jit(model.generate)
SCREAMING_SNAKE_CASE = jit_generate(lowercase__ , attention_mask=lowercase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert')
SCREAMING_SNAKE_CASE = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
SCREAMING_SNAKE_CASE = """Hello world"""
SCREAMING_SNAKE_CASE = tokenizer(lowercase__ , return_tensors='np').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase__ , 'do_samples'):
model.generate(lowercase__ , do_samples=lowercase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase__ , 'foo'):
SCREAMING_SNAKE_CASE = {"""foo""": """bar"""}
model.generate(lowercase__ , **lowercase__)
| 704 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a_ : int = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 444 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 384
if "tiny" in model_name:
lowercase__ = [3, 3, 9, 3]
lowercase__ = [96, 192, 384, 768]
if "small" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [96, 192, 384, 768]
if "base" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [128, 256, 512, 1024]
lowercase__ = 512
if "large" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [192, 384, 768, 1536]
lowercase__ = 768
if "xlarge" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [256, 512, 1024, 2048]
lowercase__ = 1024
# set label information
lowercase__ = 150
lowercase__ = """huggingface/label-files"""
lowercase__ = """ade20k-id2label.json"""
lowercase__ = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
lowercase__ = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = ConvNextConfig(
depths=__magic_name__ , hidden_sizes=__magic_name__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
lowercase__ = UperNetConfig(
backbone_config=__magic_name__ , auxiliary_in_channels=__magic_name__ , num_labels=__magic_name__ , idalabel=__magic_name__ , labelaid=__magic_name__ , )
return config
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = dct.pop(__magic_name__ )
lowercase__ = val
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="""cpu""" )["""state_dict"""]
lowercase__ = get_upernet_config(__magic_name__ )
lowercase__ = UperNetForSemanticSegmentation(__magic_name__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(__magic_name__ )
if "bn" in key:
lowercase__ = key.replace("""bn""" , """batch_norm""" )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify on image
lowercase__ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
lowercase__ = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert("""RGB""" )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowercase__ = model(__magic_name__ )
if model_name == "upernet-convnext-tiny":
lowercase__ = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowercase__ = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowercase__ = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowercase__ = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowercase__ = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A : Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase__ ( self : Any , A : str=0 ):
__snake_case: str = np.random.RandomState(_lowerCAmelCase )
__snake_case: Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
__snake_case: int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Optional[Any] = self.get_dummy_inputs()
__snake_case: List[Any] = pipe(**_lowerCAmelCase ).images
__snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: List[str] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Tuple = self.get_dummy_inputs()
__snake_case: Dict = pipe(**_lowerCAmelCase ).images
__snake_case: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Any = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Any = self.get_dummy_inputs()
__snake_case: List[str] = pipe(**_lowerCAmelCase ).images
__snake_case: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Any = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Dict = self.get_dummy_inputs()
__snake_case: List[str] = pipe(**_lowerCAmelCase ).images
__snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Any = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: List[Any] = self.get_dummy_inputs()
__snake_case: Optional[Any] = pipe(**_lowerCAmelCase ).images
__snake_case: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Any = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Any = self.get_dummy_inputs()
__snake_case: Dict = pipe(**_lowerCAmelCase ).images
__snake_case: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Optional[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: int = self.get_dummy_inputs()
__snake_case: Any = 3 * [inputs["""prompt"""]]
# forward
__snake_case: Optional[int] = pipe(**_lowerCAmelCase )
__snake_case: int = output.images[0, -3:, -3:, -1]
__snake_case: List[str] = self.get_dummy_inputs()
__snake_case: Any = 3 * [inputs.pop("""prompt""" )]
__snake_case: List[Any] = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__snake_case: Optional[int] = text_inputs["""input_ids"""]
__snake_case: Optional[int] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__snake_case: Optional[Any] = prompt_embeds
# forward
__snake_case: str = pipe(**_lowerCAmelCase )
__snake_case: int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: str = self.get_dummy_inputs()
__snake_case: Any = 3 * ["""this is a negative prompt"""]
__snake_case: Optional[int] = negative_prompt
__snake_case: Union[str, Any] = 3 * [inputs["""prompt"""]]
# forward
__snake_case: List[str] = pipe(**_lowerCAmelCase )
__snake_case: str = output.images[0, -3:, -3:, -1]
__snake_case: Union[str, Any] = self.get_dummy_inputs()
__snake_case: Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__snake_case: str = []
for p in [prompt, negative_prompt]:
__snake_case: Optional[Any] = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__snake_case: Optional[int] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__snake_case , __snake_case: Optional[int] = embeds
# forward
__snake_case: Union[str, Any] = pipe(**_lowerCAmelCase )
__snake_case: str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = ort.SessionOptions()
__snake_case: Optional[int] = False
return options
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Tuple = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__snake_case: Any = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__snake_case: Dict = output.images
__snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: Optional[Any] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: Dict = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: List[Any] = """open neural network exchange"""
__snake_case: Union[str, Any] = np.random.RandomState(0 )
__snake_case: Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__snake_case: int = output.images
__snake_case: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Optional[int] = """open neural network exchange"""
__snake_case: Tuple = np.random.RandomState(0 )
__snake_case: Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__snake_case: Optional[int] = output.images
__snake_case: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: List[Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = 0
def test_callback_fn(A : int , A : int , A : np.ndarray ) -> None:
__snake_case: Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__snake_case: List[Any] = latents[0, -3:, -3:, -1]
__snake_case: Union[str, Any] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__snake_case: Optional[Any] = latents[0, -3:, -3:, -1]
__snake_case: Optional[int] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__snake_case: str = False
__snake_case: int = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case: Any = """Andromeda galaxy in a bottle"""
__snake_case: List[str] = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__snake_case: List[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__snake_case: Dict = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case: int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 709 |
from math import factorial, radians
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 18 , SCREAMING_SNAKE_CASE__ = 10) -> float:
__snake_case: Union[str, Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case: Union[str, Any] = radians(SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = angle_in_radians
__snake_case: Tuple = 3
__snake_case: int = -1
for _ in range(SCREAMING_SNAKE_CASE__):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__import__("doctest").testmod()
| 155 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Union[str, Any] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
UpperCAmelCase : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
UpperCAmelCase : int = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase : str = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__: List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCamelCase__: str = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 127 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__a : Dict = """src/diffusers"""
# Matches is_xxx_available()
__a : Dict = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
__a : Optional[Any] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
__a : List[str] = """
{0} = None
"""
__a : str = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
__a : int = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __magic_name__ ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = _re_backend.findall(lowercase_ )
if len(lowercase_ ) == 0:
return None
return "_and_".join(lowercase_ )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
with open(os.path.join(lowercase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase = 0
UpperCamelCase = {}
# Go through the end of the file
while line_index < len(lowercase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase_ ) and len(lines[line_index] ) > 1:
UpperCamelCase = lines[line_index]
UpperCamelCase = _re_single_line_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase_ ) > 0:
UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def __magic_name__ ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(lowercase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase_ , lowercase_ )
else:
return DUMMY_CLASS.format(lowercase_ , lowercase_ )
def __magic_name__ ( lowercase_=None ) -> List[str]:
'''simple docstring'''
if backend_specific_objects is None:
UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase = "[" + ", ".join(f'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
UpperCamelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase_ , lowercase_ ) for o in objects] )
UpperCamelCase = dummy_file
return dummy_files
def __magic_name__ ( lowercase_=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCamelCase = os.path.join(lowercase_ , "utils" )
UpperCamelCase = {
backend: os.path.join(lowercase_ , f'''dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py''' )
for backend in dummy_files.keys()
}
UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase_ ):
with open(lowercase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f'''diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__a : List[str] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 414 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase = """document_qa"""
lowercase = AutoProcessor
lowercase = VisionEncoderDecoderModel
lowercase = ["""image""", """text"""]
lowercase = ["""text"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCamelCase = task_prompt.replace("{user_input}" , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
UpperCamelCase = self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE , ).sequences
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
UpperCamelCase = re.sub(R"<.*?>" , "" , SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCamelCase = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 414 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCAmelCase_ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase_ : List[Any] = 12_8022
lowerCAmelCase_ : str = 12_8028
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : str = MaMaaaTokenizer
__magic_name__ : Union[str, Any] = False
__magic_name__ : List[Any] = False
__magic_name__ : Optional[Any] = True
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
a_ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a_ : Union[str, Any] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
a_ : int = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
a_ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple , **lowercase__ : Optional[int] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase_ ( self : Dict , lowercase__ : List[str] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : Dict = """</s>"""
a_ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def lowercase_ ( self : Any ):
'''simple docstring'''
a_ : Dict = self.get_tokenizer()
a_ : List[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(lowercase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : List[str] = self.get_tokenizer()
a_ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [2, 3, 4, 5, 6] , )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowercase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
a_ : int = tokenizer.convert_tokens_to_string(lowercase__ )
self.assertEqual(lowercase__ , """This is a test""" )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : List[Any] = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__magic_name__ : str = '''facebook/m2m100_418M'''
__magic_name__ : int = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__magic_name__ : str = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__magic_name__ : str = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
'''simple docstring'''
a_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
a_ : int = 1
return cls
def lowercase_ ( self : Any ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_8063 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowercase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , lowercase__ )
def lowercase_ ( self : Any ):
'''simple docstring'''
a_ : Optional[Any] = """en"""
a_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
# fmt: off
a_ : List[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
a_ : List[str] = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
a_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ : List[Any] = tempfile.mkdtemp()
a_ : Any = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowercase__ )
a_ : str = MaMaaaTokenizer.from_pretrained(lowercase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowercase__ )
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : Optional[int] = """en"""
a_ : Optional[Any] = """fr"""
a_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors="""pt""" )
a_ : Any = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
a_ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : int = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
a_ : Tuple = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[int] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
a_ : str = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : Tuple = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(lowercase__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_8022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_8006,
} , )
| 442 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCAmelCase_ : List[str] = 'sshleifer/mar_enro_6_3_student'
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def lowercase_ ( self : int ):
'''simple docstring'''
super().setUp()
a_ : Optional[int] = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowercase__ , )
a_ : List[Any] = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
MarianMTModel.from_pretrained(lowercase__ )
@slow
@require_torch_gpu
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Union[str, Any] = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
a_ : Optional[int] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
a_ : List[str] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
a_ : Optional[Any] = bash_script.replace(lowercase__ , str(lowercase__ ) )
a_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
a_ : List[str] = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
a_ : Dict = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowercase__ , """argv""" , lowercase__ ):
a_ : List[Any] = argparse.ArgumentParser()
a_ : int = pl.Trainer.add_argparse_args(lowercase__ )
a_ : Dict = SummarizationModule.add_model_specific_args(lowercase__ , os.getcwd() )
a_ : List[str] = parser.parse_args()
a_ : List[Any] = main(lowercase__ )
# Check metrics
a_ : List[str] = load_json(model.metrics_save_path )
a_ : Any = metrics["""val"""][0]
a_ : int = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , lowercase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Dict = os.listdir(lowercase__ )
a_ : Union[str, Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : Optional[Any] = os.path.join(args.output_dir , lowercase__ )
a_ : Dict = torch.load(lowercase__ , map_location="""cpu""" )
a_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : int = {os.path.basename(lowercase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class SCREAMING_SNAKE_CASE ( snake_case_ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : int = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
a_ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
a_ : List[str] = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
a_ : List[str] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
a_ : Tuple = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
a_ : Union[str, Any] = bash_script.replace(lowercase__ , str(lowercase__ ) )
a_ : List[str] = self.get_auto_remove_tmp_dir()
a_ : List[str] = bash_script.replace("""--fp16""" , """""" )
a_ : str = 6
a_ : Union[str, Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"""--gpus=1""",
"""--learning_rate=1e-3""",
F"--num_train_epochs={epochs}",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowercase__ , """argv""" , lowercase__ ):
a_ : Dict = argparse.ArgumentParser()
a_ : Tuple = pl.Trainer.add_argparse_args(lowercase__ )
a_ : int = SummarizationDistiller.add_model_specific_args(lowercase__ , os.getcwd() )
a_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
a_ : Union[str, Any] = distill_main(lowercase__ )
# Check metrics
a_ : Union[str, Any] = load_json(model.metrics_save_path )
a_ : List[str] = metrics["""val"""][0]
a_ : Optional[Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , lowercase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Optional[int] = os.listdir(lowercase__ )
a_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : Optional[Any] = os.path.join(args.output_dir , lowercase__ )
a_ : Optional[Any] = torch.load(lowercase__ , map_location="""cpu""" )
a_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : int = {os.path.basename(lowercase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 442 | 1 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase ( _UpperCamelCase : Any ) -> Optional[int]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__UpperCAmelCase : Any = [1, 2, 3]
with pytest.raises(_UpperCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=2 )
with pytest.raises(_UpperCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def lowerCamelCase ( _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[str] = [1, 2]
__UpperCAmelCase : str = {"""a""": 1, """b""": 2}
__UpperCAmelCase : Union[str, Any] = {"""a""": [1, 2], """b""": [3, 4]}
__UpperCAmelCase : Tuple = {"""a""": {"""1""": 1}, """b""": 2}
__UpperCAmelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__UpperCAmelCase : List[Any] = [2, 3]
__UpperCAmelCase : Optional[int] = {"""a""": 2, """b""": 3}
__UpperCAmelCase : Any = {"""a""": [2, 3], """b""": [4, 5]}
__UpperCAmelCase : Optional[int] = {"""a""": {"""1""": 2}, """b""": 3}
__UpperCAmelCase : str = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) == expected_map_nested_sa
assert map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) == expected_map_nested_sa
assert map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) == expected_map_nested_sa
assert map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) == expected_map_nested_sa
assert map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) == expected_map_nested_sa
| 299 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : str = '▁'
UpperCAmelCase : Optional[int] = {'vocab_file': 'prophetnet.tokenizer'}
UpperCAmelCase : Optional[Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCAmelCase : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCAmelCase : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(_UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
__UpperCAmelCase : Optional[Any] = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = token.rstrip("""\n""" )
__UpperCAmelCase : Union[str, Any] = index
return vocab
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Union[str, Any]="[SEP]" , UpperCamelCase : int="[SEP]" , UpperCamelCase : int="[UNK]" , UpperCamelCase : Tuple="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Dict="[MASK]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
__UpperCAmelCase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCAmelCase : Optional[Any] = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__UpperCAmelCase : int = f'''[unused{i}]'''
__UpperCAmelCase : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCAmelCase : List[str] = 12
__UpperCAmelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase )
def __getstate__( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Any = None
return state
def __setstate__( self : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : str = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , """wb""" ) as fi:
__UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 299 | 1 |
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = int(_a)
if decimal in (0, 1): # Exit cases for the recursion
return str(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = divmod(_a , 2)
return binary_recursive(_a) + str(_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[int] = str(_a).strip()
if not number:
raise ValueError("No input value was provided")
SCREAMING_SNAKE_CASE : List[Any] = "-" if number.startswith("-") else ""
SCREAMING_SNAKE_CASE : Any = number.lstrip("-")
if not number.isnumeric():
raise ValueError("Input value is not an integer")
return f"{negative}0b{binary_recursive(int(_a))}"
if __name__ == "__main__":
from doctest import testmod
testmod() | 25 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def a ( self : str , a_ : int , a_ : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def a ( self : str )-> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = 20
UpperCAmelCase_ : Tuple = self._get_uniform_logits(batch_size=2 , length=a_ )
# tweak scores to not be uniform anymore
UpperCAmelCase_ : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase_ : List[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase_ : str = jax.nn.softmax(a_ , axis=-1 )
UpperCAmelCase_ : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ : Any = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(a_ , scores.copy() , cur_len=a_ ) , axis=-1 )
UpperCAmelCase_ : List[str] = jax.nn.softmax(temp_dist_warper_smoother(a_ , scores.copy() , cur_len=a_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : str = 10
UpperCAmelCase_ : List[Any] = 2
# create ramp distribution
UpperCAmelCase_ : Optional[Any] = np.broadcast_to(np.arange(a_ )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase_ : Any = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase_ : List[Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ : Tuple = top_k_warp(a_ , a_ , cur_len=a_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase_ : Any = 5
UpperCAmelCase_ : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase_ : Any = np.broadcast_to(np.arange(a_ )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase_ : Tuple = top_k_warp_safety_check(a_ , a_ , cur_len=a_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a ( self : Dict )-> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = 10
UpperCAmelCase_ : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase_ : int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase_ : str = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase_ : Union[str, Any] = np.exp(top_p_warp(a_ , a_ , cur_len=a_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase_ : List[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase_ : Optional[Any] = np.broadcast_to(np.arange(a_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase_ : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
UpperCAmelCase_ : Any = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase_ : Dict = top_p_warp(a_ , a_ , cur_len=a_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a ( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase_ : Dict = 20
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a_ )
# check that min length is applied at length 5
UpperCAmelCase_ : Union[str, Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase_ : str = 5
UpperCAmelCase_ : Tuple = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : List[Any] = min_dist_processor(a_ , a_ , cur_len=a_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase_ : str = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : Optional[Any] = 15
UpperCAmelCase_ : str = min_dist_processor(a_ , a_ , cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def a ( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 20
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase_ : List[str] = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : int = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : int = logits_processor(a_ , a_ , cur_len=a_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Tuple = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : Union[str, Any] = logits_processor(a_ , a_ , cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def a ( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 20
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = 5
UpperCAmelCase_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ , eos_token_id=a_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase_ : str = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : Optional[Any] = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : Union[str, Any] = logits_processor(a_ , a_ , cur_len=a_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : Tuple = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : int = logits_processor(a_ , a_ , cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def a ( self : int )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : List[Any] = 10
UpperCAmelCase_ : Tuple = 15
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Dict = 15
# dummy input_ids and scores
UpperCAmelCase_ : Dict = ids_tensor((batch_size, sequence_length) , a_ )
UpperCAmelCase_ : Tuple = input_ids.copy()
UpperCAmelCase_ : int = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : Optional[Any] = scores.copy()
# instantiate all dist processors
UpperCAmelCase_ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ : str = FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a_ )
UpperCAmelCase_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
UpperCAmelCase_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ , eos_token_id=a_ )
UpperCAmelCase_ : Tuple = 10
# no processor list
UpperCAmelCase_ : Optional[Any] = temp_dist_warp(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : Optional[int] = top_k_warp(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : Optional[Any] = top_p_warp(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : List[Any] = min_dist_proc(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : Dict = bos_dist_proc(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : str = eos_dist_proc(a_ , a_ , cur_len=a_ )
# with processor list
UpperCAmelCase_ : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ : Union[str, Any] = processor(a_ , a_ , cur_len=a_ )
# scores should be equal
self.assertTrue(jnp.allclose(a_ , a_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : List[Any] = 10
UpperCAmelCase_ : Optional[int] = 15
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : int = 15
# dummy input_ids and scores
UpperCAmelCase_ : List[str] = ids_tensor((batch_size, sequence_length) , a_ )
UpperCAmelCase_ : Dict = input_ids.copy()
UpperCAmelCase_ : Dict = self._get_uniform_logits(a_ , a_ )
UpperCAmelCase_ : Tuple = scores.copy()
# instantiate all dist processors
UpperCAmelCase_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ : int = FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a_ )
UpperCAmelCase_ : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
UpperCAmelCase_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ , eos_token_id=a_ )
UpperCAmelCase_ : Dict = 10
# no processor list
def run_no_processor_list(a_ : Any , a_ : List[str] , a_ : Tuple ):
UpperCAmelCase_ : List[Any] = temp_dist_warp(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : Tuple = top_k_warp(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : int = top_p_warp(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : Tuple = min_dist_proc(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : Optional[int] = bos_dist_proc(a_ , a_ , cur_len=a_ )
UpperCAmelCase_ : List[Any] = eos_dist_proc(a_ , a_ , cur_len=a_ )
return scores
# with processor list
def run_processor_list(a_ : List[str] , a_ : Optional[int] , a_ : Optional[int] ):
UpperCAmelCase_ : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ : Optional[Any] = processor(a_ , a_ , cur_len=a_ )
return scores
UpperCAmelCase_ : str = jax.jit(a_ )
UpperCAmelCase_ : Tuple = jax.jit(a_ )
UpperCAmelCase_ : Optional[int] = jitted_run_no_processor_list(a_ , a_ , a_ )
UpperCAmelCase_ : List[Any] = jitted_run_processor_list(a_ , a_ , a_ )
# scores should be equal
self.assertTrue(jnp.allclose(a_ , a_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 470 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = torchvision.models.resnetaaa(pretrained=lowerCAmelCase_ )
_snake_case = list(model.children() )[:-2]
_snake_case = nn.Sequential(*lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_snake_case = self.pool(self.model(lowerCAmelCase_ ) )
_snake_case = torch.flatten(lowerCAmelCase_ , start_dim=2 )
_snake_case = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = [json.loads(lowerCAmelCase_ ) for l in open(lowerCAmelCase_ )]
_snake_case = os.path.dirname(lowerCAmelCase_ )
_snake_case = tokenizer
_snake_case = labels
_snake_case = len(lowerCAmelCase_ )
_snake_case = max_seq_length
_snake_case = transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=lowerCAmelCase_ ) )
_snake_case , _snake_case , _snake_case = sentence[0], sentence[1:-1], sentence[-1]
_snake_case = sentence[: self.max_seq_length]
_snake_case = torch.zeros(self.n_classes )
_snake_case = 1
_snake_case = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
_snake_case = self.transforms(lowerCAmelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
_snake_case = [len(row['sentence'] ) for row in batch]
_snake_case , _snake_case = len(UpperCamelCase__ ), max(UpperCamelCase__ )
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ , dtype=torch.long )
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
_snake_case = input_row['sentence']
_snake_case = 1
_snake_case = torch.stack([row['image'] for row in batch] )
_snake_case = torch.stack([row['label'] for row in batch] )
_snake_case = torch.stack([row['image_start_token'] for row in batch] )
_snake_case = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ) -> str:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 541 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ = '''UperNetConfig'''
class lowerCamelCase_ ( nn.Module ):
def __init__( self : List[Any] , _A : int , _A : int , _A : Union[int, Tuple[int, int]] , _A : Union[int, Tuple[int, int], str] = 0 , _A : bool = False , _A : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(
in_channels=_A , out_channels=_A , kernel_size=_A , padding=_A , bias=_A , dilation=_A , )
UpperCAmelCase__ : Union[str, Any] = nn.BatchNormad(_A )
UpperCAmelCase__ : List[str] = nn.ReLU()
def lowercase_ ( self : int , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.conv(_A )
UpperCAmelCase__ : Union[str, Any] = self.batch_norm(_A )
UpperCAmelCase__ : Tuple = self.activation(_A )
return output
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : int , _A : int , _A : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[int] = [
nn.AdaptiveAvgPoolad(_A ),
UperNetConvModule(_A , _A , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_A ) , _A )
def lowercase_ ( self : Dict , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = input
for layer in self.layers:
UpperCAmelCase__ : Any = layer(_A )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : Tuple[int, ...] , _A : int , _A : int , _A : bool ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = pool_scales
UpperCAmelCase__ : Optional[Any] = align_corners
UpperCAmelCase__ : Optional[Any] = in_channels
UpperCAmelCase__ : Tuple = channels
UpperCAmelCase__ : Union[str, Any] = []
for i, pool_scale in enumerate(_A ):
UpperCAmelCase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_A , in_channels=_A , channels=_A )
self.blocks.append(_A )
self.add_module(str(_A ) , _A )
def lowercase_ ( self : int , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
for ppm in self.blocks:
UpperCAmelCase__ : Tuple = ppm(_A )
UpperCAmelCase__ : int = nn.functional.interpolate(
_A , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_A )
return ppm_outs
class lowerCamelCase_ ( nn.Module ):
def __init__( self : List[str] , _A : Union[str, Any] , _A : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = config
UpperCAmelCase__ : int = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase__ : int = in_channels
UpperCAmelCase__ : Dict = config.hidden_size
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase__ : Optional[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase__ : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase__ : str = UperNetConvModule(_A , self.channels , kernel_size=1 )
UpperCAmelCase__ : Optional[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_A )
self.fpn_convs.append(_A )
UpperCAmelCase__ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : Any , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = inputs[-1]
UpperCAmelCase__ : List[str] = [x]
psp_outs.extend(self.psp_modules(_A ) )
UpperCAmelCase__ : Dict = torch.cat(_A , dim=1 )
UpperCAmelCase__ : Tuple = self.bottleneck(_A )
return output
def lowercase_ ( self : str , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_A ) )
# build top-down path
UpperCAmelCase__ : int = len(_A )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase__ : Dict = laterals[i - 1].shape[2:]
UpperCAmelCase__ : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_A , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase__ : List[str] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase__ : Optional[Any] = torch.cat(_A , dim=1 )
UpperCAmelCase__ : int = self.fpn_bottleneck(_A )
UpperCAmelCase__ : List[str] = self.classifier(_A )
return output
class lowerCamelCase_ ( nn.Module ):
def __init__( self : str , _A : List[Any] , _A : int = 2 , _A : int = 3 , _A : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : int = config
UpperCAmelCase__ : Union[str, Any] = config.auxiliary_in_channels
UpperCAmelCase__ : Optional[int] = config.auxiliary_channels
UpperCAmelCase__ : Optional[Any] = config.auxiliary_num_convs
UpperCAmelCase__ : List[str] = config.auxiliary_concat_input
UpperCAmelCase__ : Dict = in_index
UpperCAmelCase__ : Union[str, Any] = (kernel_size // 2) * dilation
UpperCAmelCase__ : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
if self.num_convs == 0:
UpperCAmelCase__ : Union[str, Any] = nn.Identity()
else:
UpperCAmelCase__ : str = nn.Sequential(*_A )
if self.concat_input:
UpperCAmelCase__ : List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_A , padding=kernel_size // 2 )
UpperCAmelCase__ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self : Any , _A : Dict ):
'''simple docstring'''
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : List[str] , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = encoder_hidden_states[self.in_index]
UpperCAmelCase__ : List[Any] = self.convs(_A )
if self.concat_input:
UpperCAmelCase__ : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase__ : List[Any] = self.classifier(_A )
return output
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = UperNetConfig
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = True
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
if isinstance(_A , _A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase_ ( self : str ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase_ ( self : Optional[int] , _A : int , _A : Union[str, Any]=False ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = value
UpperCamelCase__ = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __a , )
class lowerCamelCase_ ( __a ):
def __init__( self : Tuple , _A : List[Any] ):
'''simple docstring'''
super().__init__(_A )
UpperCAmelCase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase__ : List[str] = UperNetHead(_A , in_channels=self.backbone.channels )
UpperCAmelCase__ : int = UperNetFCNHead(_A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_A , config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self : Union[str, Any] , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase__ : Union[str, Any] = self.backbone.forward_with_filtered_kwargs(
_A , output_hidden_states=_A , output_attentions=_A )
UpperCAmelCase__ : Optional[Any] = outputs.feature_maps
UpperCAmelCase__ : Optional[Any] = self.decode_head(_A )
UpperCAmelCase__ : Optional[int] = nn.functional.interpolate(_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Optional[Any] = None
if self.auxiliary_head is not None:
UpperCAmelCase__ : Dict = self.auxiliary_head(_A )
UpperCAmelCase__ : List[Any] = nn.functional.interpolate(
_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Tuple = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase__ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase__ : Optional[Any] = loss_fct(_A , _A )
UpperCAmelCase__ : Tuple = loss_fct(_A , _A )
UpperCAmelCase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase__ : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCAmelCase__ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_A , logits=_A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 75 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( _a : list[int] , _a : int , _a : int , _a : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case_ , snake_case_ : str = array[indexa], array[indexa]
def lowerCAmelCase__ ( _a : list[int] , _a : int , _a : int , _a : int ):
if length > 1:
snake_case_ : Union[str, Any] = int(length / 2 )
for i in range(_a , low + middle ):
comp_and_swap(_a , _a , i + middle , _a )
bitonic_merge(_a , _a , _a , _a )
bitonic_merge(_a , low + middle , _a , _a )
def lowerCAmelCase__ ( _a : list[int] , _a : int , _a : int , _a : int ):
if length > 1:
snake_case_ : List[Any] = int(length / 2 )
bitonic_sort(_a , _a , _a , 1 )
bitonic_sort(_a , low + middle , _a , 0 )
bitonic_merge(_a , _a , _a , _a )
if __name__ == "__main__":
lowercase : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase : List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 114 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['image_processor', 'tokenizer']
A : List[Any] = 'ViltImageProcessor'
A : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
snake_case_ : str = kwargs.pop("feature_extractor" )
snake_case_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : str = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
snake_case_ : List[Any] = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
snake_case_ : str = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[Any] = self.tokenizer.model_input_names
snake_case_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 114 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int = 1_00_00_00) -> int:
'''simple docstring'''
_lowercase : str = limit + 1
_lowercase : int = [0] * limit
for first_term in range(1 , lowerCAmelCase__):
for n in range(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
_lowercase : str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowercase : str = sum(1 for x in frequency[1:limit] if x == 10)
return count
if __name__ == "__main__":
print(F"""{solution() = }""") | 125 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int]=7) -> Any:
'''simple docstring'''
_lowercase : Any = None
if token is not None:
_lowercase : Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_lowercase : int = '636036'
_lowercase : str = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_lowercase : Tuple = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__).json()
return result["workflow_runs"]
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Tuple) -> Any:
'''simple docstring'''
_lowercase : Any = get_daily_ci_runs(lowerCAmelCase__)
_lowercase : Union[str, Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_lowercase : List[str] = workflow_run['id']
break
return workflow_run_id
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]) -> Optional[int]:
'''simple docstring'''
_lowercase : Any = get_last_daily_ci_runs(lowerCAmelCase__)
if workflow_run_id is not None:
_lowercase : Any = get_artifacts_links(worflow_run_id=lowerCAmelCase__ , token=lowerCAmelCase__)
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_lowercase : str = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase__ , artifact_url=lowerCAmelCase__ , output_dir=lowerCAmelCase__ , token=lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str) -> List[Any]:
'''simple docstring'''
get_last_daily_ci_artifacts(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
_lowercase : List[Any] = {}
for artifact_name in artifact_names:
_lowercase : int = os.path.join(lowerCAmelCase__ , F'''{artifact_name}.zip''')
if os.path.isfile(lowerCAmelCase__):
_lowercase : Union[str, Any] = {}
with zipfile.ZipFile(lowerCAmelCase__) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__):
# read the file
with z.open(lowerCAmelCase__) as f:
_lowercase : Optional[int] = f.read().decode('UTF-8')
return results | 125 | 1 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
for i in range(length - 1 ):
_UpperCamelCase : Optional[int] = i
for k in range(i + 1 ,lowercase_ ):
if collection[k] < collection[least]:
_UpperCamelCase : List[str] = k
if least != i:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 51 |
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51 | 1 |
"""simple docstring"""
import re
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : str = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE ="0094702343221"
print(is_sri_lankan_phone_number(phone))
| 425 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__SCREAMING_SNAKE_CASE =False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Optional[int] = pipe.dual_guided(
prompt='first prompt' ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
lowercase_ : str = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : List[Any] = generator.manual_seed(0 )
lowercase_ : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : int = 'cyberpunk 2077'
lowercase_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : int = pipe.dual_guided(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
lowercase_ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Union[str, Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ : Optional[Any] = 'A painting of a squirrel eating a burger '
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Dict = pipe.text_to_image(
prompt=__UpperCamelCase ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
lowercase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ : Dict = pipe.image_variation(__UpperCamelCase ,generator=__UpperCamelCase ,output_type='numpy' ).images
lowercase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 425 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
snake_case = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
snake_case = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
snake_case = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = float(fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = en_sentvecs.shape[0]
# mean centering
SCREAMING_SNAKE_CASE : List[Any] = en_sentvecs - np.mean(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE : List[str] = in_sentvecs - np.mean(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE : Dict = cdist(UpperCamelCase__ , UpperCamelCase__ , "cosine" )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(range(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : str = sim.argsort(axis=1 )[:, :10]
SCREAMING_SNAKE_CASE : Optional[Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _A ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase_ , UpperCAmelCase_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 720 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''keras_nlp''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["keras_nlp"] )
| 488 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
assert (
isinstance(lowercase_ , lowercase_ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = 1, 1
for _ in range(number_of_steps - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self :List[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Tuple=7 , _lowerCamelCase :Dict=3 , _lowerCamelCase :Optional[Any]=3_0 , _lowerCamelCase :List[str]=4_0_0 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :List[Any]=True , _lowerCamelCase :Any=[0.5, 0.5, 0.5] , _lowerCamelCase :Dict=[0.5, 0.5, 0.5] , _lowerCamelCase :Dict=True , _lowerCamelCase :str=1 / 2_5_5 , _lowerCamelCase :Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution
__SCREAMING_SNAKE_CASE : Tuple = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size
__SCREAMING_SNAKE_CASE : int = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : Tuple = image_std
__SCREAMING_SNAKE_CASE : Dict = do_rescale
__SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor
__SCREAMING_SNAKE_CASE : List[Any] = do_pad
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Dict , _lowerCamelCase :List[Any]=False ):
if not batched:
__SCREAMING_SNAKE_CASE : str = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = image.size
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = image.shape[1], image.shape[2]
if w < h:
__SCREAMING_SNAKE_CASE : str = int(self.size['''shortest_edge'''] * h / w )
__SCREAMING_SNAKE_CASE : int = self.size['''shortest_edge''']
elif w > h:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE : int = int(self.size['''shortest_edge'''] * w / h )
else:
__SCREAMING_SNAKE_CASE : List[str] = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE : List[str] = self.size['''shortest_edge''']
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__SCREAMING_SNAKE_CASE : Optional[int] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
__SCREAMING_SNAKE_CASE : int = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self :int ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
# Initialize image_processings
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(do_resize=_lowerCamelCase , do_normalize=_lowerCamelCase , do_rescale=_lowerCamelCase )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing_a.pad(_lowerCamelCase , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing_a(_lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self :int ):
# prepare image and target
__SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : Tuple = json.loads(f.read() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
__SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
__SCREAMING_SNAKE_CASE : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
__SCREAMING_SNAKE_CASE : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify orig_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# prepare image, target and masks_path
__SCREAMING_SNAKE_CASE : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(f.read() )
__SCREAMING_SNAKE_CASE : Dict = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
__SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__SCREAMING_SNAKE_CASE : Any = YolosImageProcessor(format='''coco_panoptic''' )
__SCREAMING_SNAKE_CASE : Dict = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
__SCREAMING_SNAKE_CASE : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
__SCREAMING_SNAKE_CASE : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify masks
__SCREAMING_SNAKE_CASE : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowerCamelCase )
# verify orig_size
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
__SCREAMING_SNAKE_CASE : Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
| 674 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """gptsan-japanese"""
_a = [
"""past_key_values""",
]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase=36_000 , lowerCAmelCase=1_280 , lowerCAmelCase=1_024 , lowerCAmelCase=8_192 , lowerCAmelCase=4_096 , lowerCAmelCase=128 , lowerCAmelCase=10 , lowerCAmelCase=0 , lowerCAmelCase=16 , lowerCAmelCase=16 , lowerCAmelCase=128 , lowerCAmelCase=0.0 , lowerCAmelCase=1e-5 , lowerCAmelCase=False , lowerCAmelCase=0.0 , lowerCAmelCase="float32" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=0.002 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=35_998 , lowerCAmelCase=35_995 , lowerCAmelCase=35_999 , **lowerCAmelCase , ) -> str:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =d_model
_lowercase =d_ff
_lowercase =d_ext
_lowercase =d_spout
_lowercase =num_switch_layers
_lowercase =num_ext_layers
_lowercase =num_switch_layers + num_ext_layers
_lowercase =num_heads
_lowercase =num_experts
_lowercase =expert_capacity
_lowercase =dropout_rate
_lowercase =layer_norm_epsilon
_lowercase =router_bias
_lowercase =router_jitter_noise
_lowercase =router_dtype
_lowercase =router_ignore_padding_tokens
_lowercase =output_hidden_states
_lowercase =output_attentions
_lowercase =initializer_factor
_lowercase =output_router_logits
_lowercase =use_cache
super().__init__(
separator_token_id=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 380 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ : Dict , A__ : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int] , A__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowercase =TapasConfig.from_json_file(A__ )
# set absolute/relative position embeddings parameter
_lowercase =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_lowercase =TapasForQuestionAnswering(config=A__ )
elif task == "WTQ":
# run_task_main.py hparams
_lowercase =4
_lowercase =True
# hparam_utils.py hparams
_lowercase =0.664694
_lowercase =0.207951
_lowercase =0.121194
_lowercase =True
_lowercase =True
_lowercase =False
_lowercase =0.0352513
_lowercase =TapasForQuestionAnswering(config=A__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_lowercase =4
_lowercase =False
# hparam_utils.py hparams
_lowercase =36.4519
_lowercase =0.903421
_lowercase =222.088
_lowercase =True
_lowercase =True
_lowercase =True
_lowercase =0.763141
_lowercase =TapasForQuestionAnswering(config=A__ )
elif task == "TABFACT":
_lowercase =TapasForSequenceClassification(config=A__ )
elif task == "MLM":
_lowercase =TapasForMaskedLM(config=A__ )
elif task == "INTERMEDIATE_PRETRAINING":
_lowercase =TapasModel(config=A__ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A__ , A__ , A__ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A__ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
_lowercase =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(A__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 380 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = 10
lowercase = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
lowercase = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowerCamelCase__ ) ),
} , features=lowerCamelCase__ , )
return dataset
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowerCamelCase__ )
return filename
# FILE_CONTENT + files
__lowerCamelCase : Dict = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt"""
lowercase = FILE_CONTENT
with open(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ )
return filename
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
import bza
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt.bz2"""
lowercase = bytes(lowerCamelCase__ , "utf-8" )
with bza.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
import gzip
lowercase = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
lowercase = bytes(lowerCamelCase__ , "utf-8" )
with gzip.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt.lz4"""
lowercase = bytes(lowerCamelCase__ , "utf-8" )
with lza.frame.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCamelCase__ , "w" ) as archive:
archive.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
import tarfile
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
import lzma
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt.xz"""
lowercase = bytes(lowerCamelCase__ , "utf-8" )
with lzma.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
import zipfile
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase = tmp_path_factory.mktemp("data" ) / """file.txt.zst"""
lowercase = bytes(lowerCamelCase__ , "utf-8" )
with zstd.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """file.xml"""
lowercase = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ )
return filename
__lowerCamelCase : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
__lowerCamelCase : List[Any] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
__lowerCamelCase : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
__lowerCamelCase : List[str] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
__lowerCamelCase : List[str] = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = datasets.Dataset.from_dict(lowerCamelCase__ )
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
lowercase = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowerCamelCase__ , "w" , newline="" ) as f:
lowercase = csv.DictWriter(lowerCamelCase__ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowerCamelCase__ , "w" , newline="" ) as f:
lowercase = csv.DictWriter(lowerCamelCase__ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
import bza
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.csv.bz2"""
with open(lowerCamelCase__ , "rb" ) as f:
lowercase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
lowercase = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowerCamelCase__ , "wb" ) as f:
lowercase = pq.ParquetWriter(lowerCamelCase__ , schema=lowerCamelCase__ )
lowercase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase__ ) )] for k in DATA[0]} , schema=lowerCamelCase__ )
writer.write_table(lowerCamelCase__ )
writer.close()
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowercase = {"""data""": DATA}
with open(lowerCamelCase__ , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowercase = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCamelCase__ , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
import gzip
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowerCamelCase__ , "rb" ) as orig_file:
with gzip.open(lowerCamelCase__ , "wb" ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
import gzip
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowerCamelCase__ , "rb" ) as orig_file:
with gzip.open(lowerCamelCase__ , "wb" ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("nested" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.join("nested" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowerCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowerCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.abc"""
with open(lowerCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowerCamelCase__ , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = """\n""".join(["First", "Second\u2029with Unicode new line", "Third"] )
lowercase = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 310 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = 'Hello world! cécé herlolip'
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : bool ):
lowercase__ : int = FairseqRobertaModel.from_pretrained(lowerCamelCase__ )
roberta.eval() # disable dropout
lowercase__ : Tuple = roberta.model.encoder.sentence_encoder
lowercase__ : Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowercase__ : Any = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , lowerCamelCase__ )
lowercase__ : List[Any] = XLMRobertaXLForSequenceClassification(lowerCamelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase__ : int = roberta_sent_encoder.embed_tokens.weight
lowercase__ : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
lowercase__ : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase__ : int = roberta_sent_encoder.layer_norm.weight
lowercase__ : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase__ : BertLayer = model.roberta.encoder.layer[i]
lowercase__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowercase__ : RobertaAttention = layer.attention
lowercase__ : str = roberta_layer.self_attn_layer_norm.weight
lowercase__ : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase__ : Optional[Any] = roberta_layer.self_attn.q_proj.weight
lowercase__ : str = roberta_layer.self_attn.q_proj.bias
lowercase__ : Optional[int] = roberta_layer.self_attn.k_proj.weight
lowercase__ : Optional[int] = roberta_layer.self_attn.k_proj.bias
lowercase__ : int = roberta_layer.self_attn.v_proj.weight
lowercase__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase__ : Any = roberta_layer.self_attn.out_proj.weight
lowercase__ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase__ : Optional[Any] = roberta_layer.final_layer_norm.weight
lowercase__ : Any = roberta_layer.final_layer_norm.bias
# intermediate
lowercase__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase__ : Dict = roberta_layer.fca.weight
lowercase__ : Any = roberta_layer.fca.bias
# output
lowercase__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase__ : Union[str, Any] = roberta_layer.fca.weight
lowercase__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
lowercase__ : str = roberta.model.classification_heads["""mnli"""].dense.bias
lowercase__ : str = roberta.model.classification_heads["""mnli"""].out_proj.weight
lowercase__ : List[str] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowercase__ : Tuple = roberta.model.encoder.lm_head.dense.weight
lowercase__ : int = roberta.model.encoder.lm_head.dense.bias
lowercase__ : Any = roberta.model.encoder.lm_head.layer_norm.weight
lowercase__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowercase__ : Dict = roberta.model.encoder.lm_head.weight
lowercase__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase__ : torch.Tensor = roberta.encode(lowerCamelCase__ ).unsqueeze(0 ) # batch of size 1
lowercase__ : Any = model(lowerCamelCase__ )[0]
if classification_head:
lowercase__ : Optional[Any] = roberta.model.classification_heads["""mnli"""](roberta.extract_features(lowerCamelCase__ ) )
else:
lowercase__ : Tuple = roberta.model(lowerCamelCase__ )[0]
print(our_output.shape , their_output.shape )
lowercase__ : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase__ : int = torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(lowerCamelCase__ ).mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 200 | 0 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
'''simple docstring'''
import os
def UpperCamelCase ( ) -> str:
'''simple docstring'''
with open(os.path.dirname(lowercase_ ) + '''/grid.txt''' ) as f:
lowercase =[] # noqa: E741
for _ in range(2_0 ):
l.append([int(lowercase_ ) for x in f.readline().split()] )
lowercase =0
# right
for i in range(2_0 ):
for j in range(1_7 ):
lowercase =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowercase =temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
lowercase =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowercase =temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
lowercase =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowercase =temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
lowercase =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowercase =temp
return maximum
if __name__ == "__main__":
print(solution())
| 72 |
'''simple docstring'''
from torch import nn
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ) -> List[Any]:
super().__init__()
_a = class_size
_a = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_a = nn.Linear(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_a = self.mlp(snake_case_ )
return logits
| 131 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase: Union[str, Any] = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: int = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[int] = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 707 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase: str = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = set()
_lowercase : Optional[int] = []
def parse_line(__UpperCAmelCase ):
for line in fp:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__UpperCAmelCase ) > 0:
_lowercase : Optional[Any] = """\n""".join(__UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(__UpperCAmelCase )
buffer.clear()
continue
else:
_lowercase : Optional[Any] = line.strip()
buffer.append(__UpperCAmelCase )
if from_gh:
for filename in os.listdir(__UpperCAmelCase ):
_lowercase : Optional[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isdir(__UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(__UpperCAmelCase ) as fp:
parse_line(__UpperCAmelCase )
else:
try:
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__UpperCAmelCase ) as fp:
parse_line(__UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = set()
_lowercase : int = [os.path.join(__UpperCAmelCase , __UpperCAmelCase ) for p in os.listdir(__UpperCAmelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__UpperCAmelCase , __UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return values.split(""",""" )
UpperCAmelCase: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
UpperCAmelCase: Any = parser.parse_args()
UpperCAmelCase: Any = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase: str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase: str = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase: Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 600 | 0 |
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int | float] , __lowerCamelCase :int , __lowerCamelCase :int ):
if len(__lowerCamelCase ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(__lowerCamelCase )
or left < -len(__lowerCamelCase )
or right >= len(__lowerCamelCase )
or right < -len(__lowerCamelCase )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
_lowerCAmelCase = (left + right) >> 1 # the middle
_lowerCAmelCase = find_max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # find max in range[left, mid]
_lowerCAmelCase = find_max(__lowerCamelCase , mid + 1 , __lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 5 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = 'nat'
__lowerCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self :Optional[int] , _lowercase :Optional[Any]=4 , _lowercase :List[Any]=3 , _lowercase :List[Any]=64 , _lowercase :Any=[3, 4, 6, 5] , _lowercase :List[str]=[2, 4, 8, 16] , _lowercase :List[Any]=7 , _lowercase :Optional[int]=3.0 , _lowercase :str=True , _lowercase :List[str]=0.0 , _lowercase :str=0.0 , _lowercase :Tuple=0.1 , _lowercase :int="gelu" , _lowercase :Union[str, Any]=0.02 , _lowercase :List[Any]=1e-5 , _lowercase :Any=0.0 , _lowercase :int=None , _lowercase :int=None , **_lowercase :int , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(_lowercase )
lowercase__ = num_heads
lowercase__ = kernel_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
lowercase__ = layer_scale_init_value
lowercase__ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(_lowercase ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 611 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _A ( __magic_name__ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(__magic_name__ )
lowercase__ = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , "__name__" , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("transformers" )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def _A ( __magic_name__ , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , **__magic_name__ , ):
lowercase__ = get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(__magic_name__ , encoding="utf-8" ) as reader:
return json.load(__magic_name__ )
class lowerCAmelCase :
def __init__( self :List[Any] ):
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCAmelCase ( cls :Tuple , _lowercase :Any , **_lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = kwargs.pop("config" , _lowercase )
lowercase__ = kwargs.pop("trust_remote_code" , _lowercase )
lowercase__ = True
lowercase__ , lowercase__ = ImageProcessingMixin.get_image_processor_dict(_lowercase , **_lowercase )
lowercase__ = config_dict.get("image_processor_type" , _lowercase )
lowercase__ = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
lowercase__ = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase__ = config_dict.pop("feature_extractor_type" , _lowercase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowercase__ = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowercase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
lowercase__ = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
lowercase__ = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.image_processor_type``
lowercase__ = getattr(_lowercase , "image_processor_type" , _lowercase )
if hasattr(_lowercase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowercase__ = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
lowercase__ = image_processor_class_from_name(_lowercase )
lowercase__ = image_processor_auto_map is not None
lowercase__ = image_processor_class is not None or type(_lowercase ) in IMAGE_PROCESSOR_MAPPING
lowercase__ = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
lowercase__ = kwargs.pop("code_revision" , _lowercase )
if os.path.isdir(_lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowercase , **_lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowercase ) in IMAGE_PROCESSOR_MAPPING:
lowercase__ = IMAGE_PROCESSOR_MAPPING[type(_lowercase )]
return image_processor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCAmelCase ( _lowercase :Optional[int] , _lowercase :Dict ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_lowercase , _lowercase )
| 611 | 1 |
'''simple docstring'''
import numpy as np
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ = (0, 0)
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
def __eq__( self : Dict , _lowerCamelCase : List[str] ) -> int:
return self.position == cell.position
def __A ( self : List[Any] ) -> str:
print(self.position )
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : int=(5, 5) ) -> Tuple:
__magic_name__ = np.zeros(lowerCamelCase_ )
__magic_name__ = world_size[0]
__magic_name__ = world_size[1]
def __A ( self : str ) -> int:
print(self.w )
def __A ( self : Optional[Any] , _lowerCamelCase : str ) -> Optional[int]:
__magic_name__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__magic_name__ = cell.position[0]
__magic_name__ = cell.position[1]
__magic_name__ = []
for n in neughbour_cord:
__magic_name__ = current_x + n[0]
__magic_name__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__magic_name__ = Cell()
__magic_name__ = (x, y)
__magic_name__ = cell
neighbours.append(lowerCamelCase_ )
return neighbours
def __snake_case ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = []
_open.append(a_ )
while _open:
__magic_name__ = np.argmin([n.f for n in _open] )
__magic_name__ = _open[min_f]
_closed.append(_open.pop(a_ ) )
if current == goal:
break
for n in world.get_neigbours(a_ ):
for c in _closed:
if c == n:
continue
__magic_name__ = current.g + 1
__magic_name__ = n.position
__magic_name__ = goal.position
__magic_name__ = (ya - ya) ** 2 + (xa - xa) ** 2
__magic_name__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a_ )
__magic_name__ = []
while current.parent is not None:
path.append(current.position )
__magic_name__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__magic_name__ : Dict =Gridworld()
# Start position and goal
__magic_name__ : Union[str, Any] =Cell()
__magic_name__ : List[Any] =(0, 0)
__magic_name__ : List[str] =Cell()
__magic_name__ : Optional[Any] =(4, 4)
print(F'''path from {start.position} to {goal.position}''')
__magic_name__ : Optional[Any] =astar(world, start, goal)
# Just for visual reasons.
for i in s:
__magic_name__ : List[Any] =1
print(world.w)
| 664 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt'}
lowercase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowercase_ = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
lowercase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ConvBertTokenizer
def __init__( self: str , a: Any=None , a: Optional[int]=None , a: Tuple=True , a: int="[UNK]" , a: Dict="[SEP]" , a: int="[PAD]" , a: List[Any]="[CLS]" , a: List[Any]="[MASK]" , a: Optional[int]=True , a: int=None , **a: int , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
__lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
__lowerCamelCase : List[Any] = getattr(a , normalizer_state.pop('type' ) )
__lowerCamelCase : Dict = do_lower_case
__lowerCamelCase : List[str] = strip_accents
__lowerCamelCase : Dict = tokenize_chinese_chars
__lowerCamelCase : Optional[Any] = normalizer_class(**a )
__lowerCamelCase : Union[str, Any] = do_lower_case
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Optional[int]=None ):
__lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self: Any , a: List[int] , a: Optional[List[int]] = None ):
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self: List[Any] , a: str , a: Optional[str] = None ):
__lowerCamelCase : str = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 718 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """naver-clova-ix/donut-base-finetuned-docvqa"""
__snake_case = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__snake_case = """document_qa"""
__snake_case = AutoProcessor
__snake_case = VisionEncoderDecoderModel
__snake_case = ["""image""", """text"""]
__snake_case = ["""text"""]
def __init__( self: Dict , *a: List[Any] , **a: List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*a , **a )
def _snake_case ( self: str , a: "Image" , a: str ):
__lowerCamelCase : str = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__lowerCamelCase : Dict = task_prompt.replace('{user_input}' , a )
__lowerCamelCase : Optional[Any] = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt' ).input_ids
__lowerCamelCase : Union[str, Any] = self.pre_processor(a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self: Optional[Any] , a: Tuple ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def _snake_case ( self: Optional[Any] , a: Any ):
__lowerCamelCase : Union[str, Any] = self.pre_processor.batch_decode(a )[0]
__lowerCamelCase : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__lowerCamelCase : Optional[int] = re.sub(R'<.*?>' , '' , a , count=1 ).strip() # remove first task start token
__lowerCamelCase : int = self.pre_processor.tokenajson(a )
return sequence["answer"]
| 230 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCAmelCase = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 585 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 585 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , a: Tuple , a: Any=13 , a: Dict=7 , a: Optional[Any]=True , a: Any=True , a: Optional[Any]=True , a: Any=True , a: Optional[int]=99 , a: Tuple=32 , a: int=5 , a: Optional[int]=4 , a: Optional[int]=37 , a: List[str]="gelu" , a: Optional[int]=0.1 , a: List[str]=0.1 , a: Optional[int]=512 , a: Any=16 , a: Optional[int]=2 , a: Union[str, Any]=0.0_2 , a: Union[str, Any]=4 , ):
__lowerCamelCase : Dict = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : int = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : Optional[int] = use_attention_mask
__lowerCamelCase : Union[str, Any] = use_token_type_ids
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Dict = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : List[Any] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[str] = type_vocab_size
__lowerCamelCase : Optional[int] = type_sequence_label_size
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Any = num_choices
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = FlaxAlbertModelTester(self )
@slow
def _snake_case ( self: Tuple ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowerCamelCase : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCamelCase : Tuple = model(a , attention_mask=a )[0]
__lowerCamelCase : List[str] = (1, 11, 768)
self.assertEqual(output.shape , a )
__lowerCamelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 702 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if point:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
__lowerCamelCase : List[Any] = (
'Expected a list of numbers as input, found '
f'{type(SCREAMING_SNAKE_CASE__ ).__name__}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Tuple = f'Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}'
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError('Missing an input' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase__ ( unittest.TestCase ):
__UpperCAmelCase = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase = ['''accelerate''', '''launch''']
__UpperCAmelCase = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase = '''default_config.yaml'''
__UpperCAmelCase = config_folder / config_file
__UpperCAmelCase = config_folder / '''_default_config.yaml'''
__UpperCAmelCase = Path('''tests/test_configs''' )
@classmethod
def UpperCamelCase_ ( cls) -> List[str]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def UpperCamelCase_ ( cls) -> Tuple:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def UpperCamelCase_ ( self) -> int:
for config in sorted(self.test_config_path.glob("""**/*.yaml""")):
with self.subTest(config_file=SCREAMING_SNAKE_CASE):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(SCREAMING_SNAKE_CASE), self.test_file_path] , env=os.environ.copy())
def UpperCamelCase_ ( self) -> Any:
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy())
class lowercase__ ( unittest.TestCase ):
__UpperCAmelCase = '''test-tpu'''
__UpperCAmelCase = '''us-central1-a'''
__UpperCAmelCase = '''ls'''
__UpperCAmelCase = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase = '''cd /usr/share'''
__UpperCAmelCase = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase = '''Running gcloud compute tpus tpu-vm ssh'''
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Union[str, Any] = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : int = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Optional[int] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Optional[int] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Union[str, Any] = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Optional[int] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : List[str] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Union[str, Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , )
| 88 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Optional[Any]=512 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Any=None , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = True
_A = 99
_A = 384
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = 128
_A = 2
_A = 9
_A = 1
_A = None
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ):
_A = TFConvBertModel(config=_UpperCAmelCase )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = [input_ids, input_mask]
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ):
_A = TFConvBertForMaskedLM(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ):
_A = self.num_labels
_A = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ):
_A = self.num_choices
_A = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
_A = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ):
_A = self.num_labels
_A = TFConvBertForTokenClassification(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ):
_A = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Dict ):
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Dict = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = False
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = TFConvBertModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
_A = True
_A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_A = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = model_class(_UpperCAmelCase )
_A = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
_A = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
_A = tf.keras.models.load_model(_UpperCAmelCase )
_A = model(_UpperCAmelCase )
if self.is_encoder_decoder:
_A = outputs['encoder_hidden_states']
_A = outputs['encoder_attentions']
else:
_A = outputs['hidden_states']
_A = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase_ ( self : int ):
_A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
_A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_A = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
_A = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : Tuple ):
_A = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
_A = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Tuple ):
_A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_A = True
_A = False
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(_UpperCAmelCase )[0]
_A = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase )
_A = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 717 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : str = None , _UpperCAmelCase : uuid.UUID = None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Tuple=None ):
if not conversation_id:
_A = uuid.uuida()
if past_user_inputs is None:
_A = []
if generated_responses is None:
_A = []
_A = conversation_id
_A = past_user_inputs
_A = generated_responses
_A = text
def __eq__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
_A = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_A = text
def lowerCAmelCase_ ( self : List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_A = None
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str ):
self.generated_responses.append(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ):
_A = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_A = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__lowerCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Any ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
_A = self.tokenizer.eos_token
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : List[Any] ):
_A = {}
_A = {}
_A = {}
if min_length_for_response is not None:
_A = min_length_for_response
if minimum_tokens is not None:
_A = minimum_tokens
if "max_length" in generate_kwargs:
_A = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_A = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[Conversation, List[Conversation]] , _UpperCAmelCase : int=0 , **_UpperCAmelCase : str ):
_A = super().__call__(_UpperCAmelCase , num_workers=_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Conversation , _UpperCAmelCase : int=32 ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_A = self.tokenizer._build_conversation_input_ids(_UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_A = self._legacy_parse_and_tokenize(_UpperCAmelCase )
if self.framework == "pt":
_A = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_A = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=10 , **_UpperCAmelCase : Any ):
_A = generate_kwargs.get('max_length' , self.model.config.max_length )
_A = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_A = max_length - minimum_tokens
_A = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_A = model_inputs['attention_mask'][:, -trim:]
_A = model_inputs.pop('conversation' )
_A = max_length
_A = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
if self.model.config.is_encoder_decoder:
_A = 1
else:
_A = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=True ):
_A = model_outputs['output_ids']
_A = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
_A = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_UpperCAmelCase )
return conversation
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Conversation ):
_A = self.tokenizer.eos_token_id
_A = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > self.tokenizer.model_max_length:
_A = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 505 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = RobertaTokenizer
_snake_case = RobertaTokenizerFast
_snake_case = True
_snake_case = {"""cls_token""": """<s>"""}
def UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case : List[Any] = dict(zip(A , range(len(A ) ) ) )
snake_case : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case : Any = {"""unk_token""": """<unk>"""}
snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def UpperCAmelCase ( self , **A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , **A ) -> Any:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : int = """lower newer"""
snake_case : List[str] = """lower newer"""
return input_text, output_text
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : Optional[int] = """lower newer"""
snake_case : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
snake_case : Tuple = tokens + [tokenizer.unk_token]
snake_case : str = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> str:
snake_case : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = self.tokenizer_class.from_pretrained("""roberta-base""" )
snake_case : int = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
snake_case : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
snake_case : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
snake_case : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(A )
snake_case : str = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[str] = self.get_tokenizer()
snake_case : Any = """Encode this sequence."""
snake_case : Any = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case : Any = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
snake_case : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
snake_case : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case : str = tokenizer.encode(A , add_special_tokens=A )
snake_case : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
snake_case : int = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
snake_case : Dict = tokenizer.convert_tokens_to_ids(A )
snake_case : Optional[Any] = """Encode <mask> sequence"""
snake_case : List[Any] = """Encode <mask>sequence"""
snake_case : Tuple = tokenizer.encode(A )
snake_case : int = encoded.index(A )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
snake_case : Tuple = tokenizer.encode(A )
snake_case : Tuple = encoded.index(A )
snake_case : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : Tuple = self.tokenizer_class.from_pretrained(A , **A )
snake_case : List[Any] = """A, <mask> AllenNLP sentence."""
snake_case : Union[str, Any] = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
snake_case : Optional[int] = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def UpperCAmelCase ( self ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def UpperCAmelCase ( self ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : str = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case : Optional[Any] = f"""{text_of_1_token} {text_of_1_token}"""
snake_case : str = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : Tuple = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : Optional[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
snake_case : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
snake_case : Any = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 587 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase ,lowercase ,lowercase ,lowercase )
for node in graph )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> bool:
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase ,lowercase ,lowercase ,lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = HfArgumentParser(UpperCAmelCase )
__lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : int = TensorFlowBenchmark(args=UpperCAmelCase )
try:
__lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCamelCase : Tuple = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__lowerCamelCase : List[str] = """ """.join(str(UpperCAmelCase ).split(""" """ )[:-1] )
__lowerCamelCase : int = """"""
__lowerCamelCase : Tuple = eval(str(UpperCAmelCase ).split(""" """ )[-1] )
__lowerCamelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__lowerCamelCase : int = full_error_msg + begin_error_msg + str(UpperCAmelCase )
raise ValueError(UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 458 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__UpperCamelCase : Tuple = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def _UpperCAmelCase ( UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : int = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
__UpperCamelCase : str = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def _UpperCAmelCase ( UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : List[str] = list(s_dict.keys() )
for key in keys:
__lowerCamelCase : Union[str, Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__lowerCamelCase : Optional[int] = new_key.replace(UpperCAmelCase , UpperCAmelCase )
print(f"""{key} -> {new_key}""" )
__lowerCamelCase : Dict = s_dict.pop(UpperCAmelCase )
return s_dict
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Dict = emb.weight.shape
__lowerCamelCase : Tuple = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = emb.weight.data
return lin_layer
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__lowerCamelCase : Any = os.path.basename(UpperCAmelCase )
__lowerCamelCase : int = url.split("""/""" )[-2]
__lowerCamelCase : str = os.path.join(UpperCAmelCase , UpperCAmelCase )
if os.path.exists(UpperCAmelCase ) and not os.path.isfile(UpperCAmelCase ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(UpperCAmelCase ):
__lowerCamelCase : Any = open(UpperCAmelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(UpperCAmelCase ) as source, open(UpperCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
__lowerCamelCase : Union[str, Any] = source.read(8_192 )
if not buffer:
break
output.write(UpperCAmelCase )
loop.update(len(UpperCAmelCase ) )
__lowerCamelCase : List[str] = open(UpperCAmelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _UpperCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
__lowerCamelCase : Optional[int] = _download(_MODELS[checkpoint_path] )
else:
__lowerCamelCase : Union[str, Any] = torch.load(UpperCAmelCase , map_location="""cpu""" )
__lowerCamelCase : Any = original_checkpoint["""dims"""]
__lowerCamelCase : int = original_checkpoint["""model_state_dict"""]
__lowerCamelCase : Tuple = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
__lowerCamelCase : int = True
__lowerCamelCase : Tuple = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
__lowerCamelCase : Any = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=UpperCAmelCase , decoder_ffn_dim=UpperCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
__lowerCamelCase : List[str] = WhisperForConditionalGeneration(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__lowerCamelCase : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowerCamelCase : Tuple = proj_out_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__UpperCamelCase : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 458 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A = """facebook/wmt19-en-de"""
A = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
A = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
A = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
A = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 77 | '''simple docstring'''
import numpy as np
def UpperCamelCase__ ( _lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 523 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase : Any = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_UpperCamelCase : Any = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_UpperCamelCase : Any = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return float((preds == labels).mean() )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]="binary" ) -> Tuple:
'''simple docstring'''
lowercase__ : Any = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : str = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = {}
for id_pred, label in zip(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : str = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
lowercase__ : Optional[Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ : Optional[Any] = [(pred, label)]
lowercase__ : List[Any] = [], []
for question, preds_labels in question_map.items():
lowercase__ : Dict = zip(*_lowerCAmelCase )
lowercase__ : Optional[Any] = fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average='macro' )
fas.append(_lowerCAmelCase )
lowercase__ : Any = int(sum(pred == label for pred, label in preds_labels ) == len(_lowerCAmelCase ) )
ems.append(_lowerCAmelCase )
lowercase__ : Union[str, Any] = float(sum(_lowerCAmelCase ) / len(_lowerCAmelCase ) )
lowercase__ : Dict = sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
lowercase__ : Tuple = float(fa_score(y_true=_lowerCAmelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def _UpperCAmelCase ( self ) -> Dict:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(a , a )}
elif self.config_name == "cb":
return acc_and_fa(a , a , fa_avg='macro' )
elif self.config_name == "record":
lowercase__ : str = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowercase__ : str = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(a , a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(a , a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(a , a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 709 | """simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645 | 0 |
def _lowerCamelCase ( __lowerCamelCase = 200_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase__ : int = [0 for i in range(n + 1 )]
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : List[Any] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : str = 0
for i in range(lowerCamelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : List[str] = 1
while repunit:
_SCREAMING_SNAKE_CASE : Tuple = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0_0_0_0_0 ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 572 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = KandinskyVaaPriorPipeline
UpperCamelCase__ = ["""prompt"""]
UpperCamelCase__ = ["""prompt""", """negative_prompt"""]
UpperCamelCase__ = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase__ = False
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return 100
@property
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowerCamelCase )
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__: Any = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
UpperCamelCase__: Optional[int] = PriorTransformer(**__lowerCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCamelCase__: List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__: int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
UpperCamelCase__: Any = CLIPVisionModelWithProjection(__lowerCamelCase )
return model
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.dummy_prior
UpperCamelCase__: List[str] = self.dummy_image_encoder
UpperCamelCase__: Union[str, Any] = self.dummy_text_encoder
UpperCamelCase__: Tuple = self.dummy_tokenizer
UpperCamelCase__: List[str] = self.dummy_image_processor
UpperCamelCase__: Tuple = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__lowerCamelCase , clip_sample_range=10.0 , )
UpperCamelCase__: List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict=0 ):
'''simple docstring'''
if str(__lowerCamelCase ).startswith("mps" ):
UpperCamelCase__: Dict = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase__: str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase__: Tuple = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Dict = "cpu"
UpperCamelCase__: Dict = self.get_dummy_components()
UpperCamelCase__: int = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase__: Dict = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__: List[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase__: Dict = output.image_embeds
UpperCamelCase__: str = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
UpperCamelCase__: int = image[0, -10:]
UpperCamelCase__: Dict = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCamelCase__: Union[str, Any] = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: List[str] = torch_device == "cpu"
UpperCamelCase__: Optional[Any] = True
UpperCamelCase__: Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
@skip_mps
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Any = torch_device == "cpu"
UpperCamelCase__: List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
| 221 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: int=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[Any]=99 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Any=37 , __lowerCamelCase: List[str]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=512 , __lowerCamelCase: Union[str, Any]=16 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Any=3 , __lowerCamelCase: Any=4 , __lowerCamelCase: str=None , ):
'''simple docstring'''
UpperCamelCase__: List[Any] = parent
UpperCamelCase__: Union[str, Any] = 13
UpperCamelCase__: int = 7
UpperCamelCase__: int = True
UpperCamelCase__: int = True
UpperCamelCase__: Union[str, Any] = True
UpperCamelCase__: str = True
UpperCamelCase__: Optional[Any] = 99
UpperCamelCase__: str = 384
UpperCamelCase__: Dict = 2
UpperCamelCase__: Optional[Any] = 4
UpperCamelCase__: Union[str, Any] = 37
UpperCamelCase__: str = "gelu"
UpperCamelCase__: Union[str, Any] = 0.1
UpperCamelCase__: Union[str, Any] = 0.1
UpperCamelCase__: List[Any] = 512
UpperCamelCase__: Dict = 16
UpperCamelCase__: Union[str, Any] = 2
UpperCamelCase__: Optional[Any] = 0.02
UpperCamelCase__: Optional[int] = 3
UpperCamelCase__: Optional[Any] = 4
UpperCamelCase__: int = 128
UpperCamelCase__: Union[str, Any] = 2
UpperCamelCase__: Optional[int] = 9
UpperCamelCase__: Any = 1
UpperCamelCase__: Optional[Any] = None
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__: Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase__: int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__: str = None
if self.use_token_type_ids:
UpperCamelCase__: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__: str = None
UpperCamelCase__: str = None
UpperCamelCase__: Union[str, Any] = None
if self.use_labels:
UpperCamelCase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__: List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__: Tuple = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: str = TFConvBertModel(config=__lowerCamelCase )
UpperCamelCase__: Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__: str = [input_ids, input_mask]
UpperCamelCase__: str = model(__lowerCamelCase )
UpperCamelCase__: Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = TFConvBertForMaskedLM(config=__lowerCamelCase )
UpperCamelCase__: Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: str = self.num_labels
UpperCamelCase__: Any = TFConvBertForSequenceClassification(config=__lowerCamelCase )
UpperCamelCase__: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.num_choices
UpperCamelCase__: Dict = TFConvBertForMultipleChoice(config=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Tuple = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Dict = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase__: List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.num_labels
UpperCamelCase__: str = TFConvBertForTokenClassification(config=__lowerCamelCase )
UpperCamelCase__: Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
UpperCamelCase__: List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
): List[Any] = config_and_inputs
UpperCamelCase__: Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = TFConvBertModelTester(self )
UpperCamelCase__: Any = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: str = True
UpperCamelCase__: Union[str, Any] = True
if hasattr(__lowerCamelCase , "use_cache" ):
UpperCamelCase__: int = True
UpperCamelCase__: List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Optional[Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase__: List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = model_class(__lowerCamelCase )
UpperCamelCase__: List[str] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
UpperCamelCase__: str = os.path.join(__lowerCamelCase , "saved_model" , "1" )
UpperCamelCase__: Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
UpperCamelCase__: Any = model(__lowerCamelCase )
if self.is_encoder_decoder:
UpperCamelCase__: int = outputs["encoder_hidden_states"]
UpperCamelCase__: str = outputs["encoder_attentions"]
else:
UpperCamelCase__: str = outputs["hidden_states"]
UpperCamelCase__: Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
UpperCamelCase__: Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Tuple = True
UpperCamelCase__: int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
UpperCamelCase__: Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase: List[Any] ):
UpperCamelCase__: Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__: Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase: List[str] ):
UpperCamelCase__: str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__: int = True
UpperCamelCase__: Tuple = False
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: List[Any] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Optional[int] = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
UpperCamelCase__: List[str] = model_class(__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: int = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Any = True
UpperCamelCase__: int = model_class(__lowerCamelCase )
UpperCamelCase__: Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase__: Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__: Dict = model(__lowerCamelCase )[0]
UpperCamelCase__: Tuple = [1, 6, 768]
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase__: Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 221 | 1 |
from __future__ import annotations
def UpperCAmelCase_ ( snake_case__ , snake_case__ = None ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = word_bank or []
# create a table
lowerCAmelCase__ = len(__snake_case ) + 1
lowerCAmelCase__ = []
for _ in range(__snake_case ):
table.append([] )
# seed value
lowerCAmelCase__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__snake_case ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__snake_case )] == word:
lowerCAmelCase__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__snake_case )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__snake_case )]:
combination.reverse()
return table[len(__snake_case )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 193 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 88 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """ChineseCLIPImageProcessor"""
snake_case_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] ,A : int=None ,A : str=None ,**A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase__ : Any = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
UpperCAmelCase__ : Union[str, Any] = self.image_processor
def __call__( self : Optional[Any] ,A : Union[str, Any]=None ,A : Tuple=None ,A : Any=None ,**A : int ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase__ : Any = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase__ : str = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase__ : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def __lowercase ( self : Union[str, Any] ,*A : Tuple ,**A : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A ,**A )
def __lowercase ( self : List[str] ,*A : Tuple ,**A : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*A ,**A )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer.model_input_names
UpperCAmelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
| 194 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = """timm_backbone"""
def __init__( self : List[str] ,A : Any=None ,A : List[Any]=3 ,A : Any=True ,A : Union[str, Any]=True ,A : List[Any]=None ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(**A )
UpperCAmelCase__ : Optional[int] = backbone
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Optional[int] = features_only
UpperCAmelCase__ : Tuple = use_pretrained_backbone
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[str] = out_indices if out_indices is not None else (-1,)
| 194 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( a_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = DanceDiffusionPipeline
__UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCAmelCase = False
__UpperCAmelCase = False
def a__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCamelCase_ , use_timestep_embedding=UpperCamelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__magic_name__ = IPNDMScheduler()
__magic_name__ = {
'unet': unet,
'scheduler': scheduler,
}
return components
def a__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
__magic_name__ = torch.manual_seed(UpperCamelCase_ )
else:
__magic_name__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__magic_name__ = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def a__ ( self : List[Any] ):
'''simple docstring'''
__magic_name__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = DanceDiffusionPipeline(**UpperCamelCase_ )
__magic_name__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = self.get_dummy_inputs(UpperCamelCase_ )
__magic_name__ = pipe(**UpperCamelCase_ )
__magic_name__ = output.audios
__magic_name__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__magic_name__ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a__ ( self : int ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def a__ ( self : str ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def a__ ( self : Optional[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def a__ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = torch_device
__magic_name__ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__magic_name__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(generator=UpperCamelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
__magic_name__ = output.audios
__magic_name__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__magic_name__ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = torch_device
__magic_name__ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__magic_name__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(generator=UpperCamelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
__magic_name__ = output.audios
__magic_name__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__magic_name__ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 545 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( a_):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int]=1_3 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=False , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=9_9 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Any=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Optional[int]="last" , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , ):
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_lengths
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = gelu_activation
__magic_name__ = sinusoidal_embeddings
__magic_name__ = causal
__magic_name__ = asm
__magic_name__ = n_langs
__magic_name__ = vocab_size
__magic_name__ = n_special
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = summary_type
__magic_name__ = use_proj
__magic_name__ = scope
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_input_lengths:
__magic_name__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , 2 ).float()
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , ):
'''simple docstring'''
__magic_name__ = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , langs=UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
'''simple docstring'''
__magic_name__ = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , ):
'''simple docstring'''
__magic_name__ = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
__magic_name__ = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ )
__magic_name__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
__magic_name__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
((__magic_name__) , ) = result_with_labels.to_tuple()
__magic_name__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
((__magic_name__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a__ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , ):
'''simple docstring'''
__magic_name__ = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , ):
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , ):
'''simple docstring'''
__magic_name__ = self.num_choices
__magic_name__ = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( a_ , a_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=False ):
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = FlaubertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=3_7 )
def a__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def a__ ( self : Optional[int] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def a__ ( self : Any ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__magic_name__ = True
__magic_name__ = model_class(config=UpperCamelCase_ )
__magic_name__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , 'traced_model.pt' ) )
__magic_name__ = torch.jit.load(os.path.join(UpperCamelCase_ , 'traced_model.pt' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['input_ids'].to(UpperCamelCase_ ) , inputs_dict['attention_mask'].to(UpperCamelCase_ ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
__magic_name__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase_ )[0]
__magic_name__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase_ )
__magic_name__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1e-4 ) ) | 545 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case__ ( __A):
'''simple docstring'''
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Dict = None
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Dict = None
class snake_case__ ( __A):
'''simple docstring'''
def __init__( self , a__=1 , a__=0 , a__=2 , a__=5_12 , a__="cls" , a__=False , a__=True , **a__ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
__snake_case :List[str] = project_dim
__snake_case :int = pooler_fn
__snake_case :int = learn_encoder
__snake_case :List[Any] = use_attention_mask
class snake_case__ ( __A):
'''simple docstring'''
lowerCamelCase : Tuple = [r"pooler", r"logit_scale"]
lowerCamelCase : int = [r"position_ids", r"predictions.decoder.bias"]
lowerCamelCase : Union[str, Any] = "roberta"
lowerCamelCase : int = RobertaSeriesConfig
def __init__( self , a__ ) -> Any:
'''simple docstring'''
super().__init__(a__ )
__snake_case :Tuple = XLMRobertaModel(a__ )
__snake_case :Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
__snake_case :Tuple = getattr(a__ , """has_pre_transformation""" , a__ )
if self.has_pre_transformation:
__snake_case :Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
__snake_case :List[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __lowercase ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ) -> Optional[int]:
'''simple docstring'''
__snake_case :Dict = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case :Optional[Any] = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
__snake_case :Any = outputs['''hidden_states'''][-2]
__snake_case :Dict = self.pre_LN(a__ )
__snake_case :str = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__snake_case :Dict = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 708 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class snake_case__ :
'''simple docstring'''
def __init__( self , a__=2 , a__=3 , a__=64 , a__=None ) -> int:
'''simple docstring'''
__snake_case :Any = np.random.default_rng(a__ )
__snake_case :List[str] = length
__snake_case :Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__snake_case :Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self , a__ ) -> int:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class snake_case__ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , a__=0 , a__=0 , a__=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case :Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case :int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case :Dict = True
def __lowercase ( self , a__=None ) -> Optional[Any]:
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__snake_case :Tuple = False
return x * self.a[0] + self.b[0]
class snake_case__ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , a__=0 , a__=0 , a__=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = torch.nn.Parameter(torch.tensor(a__ ).float() )
__snake_case :List[str] = torch.nn.Parameter(torch.tensor(a__ ).float() )
__snake_case :str = True
def __lowercase ( self , a__=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__snake_case :List[Any] = False
return x * self.a + self.b
def UpperCamelCase ( snake_case__ : Optional[int] ,snake_case__ : int = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__snake_case :Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__snake_case :Dict = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__snake_case :Any = load_dataset("""csv""" ,data_files=snake_case__ )
__snake_case :Dict = datasets["""train"""].unique("""label""" )
__snake_case :List[Any] = {v: i for i, v in enumerate(snake_case__ )}
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case :Optional[Any] = tokenizer(
examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case__ ,max_length=snake_case__ ,padding="""max_length""" )
if "label" in examples:
__snake_case :Dict = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case :List[str] = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=["""sentence1""", """sentence2""", """label"""] ,)
def collate_fn(snake_case__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(snake_case__ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
__snake_case :List[Any] = DataLoader(tokenized_datasets["""train"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=2 )
__snake_case :str = DataLoader(tokenized_datasets["""validation"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=1 )
return train_dataloader, eval_dataloader
| 291 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Any = 2
@register_to_config
def __init__( self , snake_case = 0.02 , snake_case = 100 , snake_case = 1.007 , snake_case = 80 , snake_case = 0.05 , snake_case = 50 , ):
'''simple docstring'''
UpperCamelCase__ = sigma_max
# setable values
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None # sigma(t_i)
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
return sample
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase__ = torch.from_numpy(snake_case ).to(snake_case )
UpperCamelCase__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase__ = torch.tensor(snake_case , dtype=torch.floataa , device=snake_case )
def snake_case__ ( self , snake_case , snake_case , snake_case = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase__ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase__ = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase__ = self.config.s_noise * randn_tensor(sample.shape , generator=snake_case ).to(sample.device )
UpperCamelCase__ = sigma + gamma * sigma
UpperCamelCase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case = True , ):
'''simple docstring'''
UpperCamelCase__ = sample_hat + sigma_hat * model_output
UpperCamelCase__ = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case , derivative=snake_case , pred_original_sample=snake_case )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ):
'''simple docstring'''
UpperCamelCase__ = sample_prev + sigma_prev * model_output
UpperCamelCase__ = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case , derivative=snake_case , pred_original_sample=snake_case )
def snake_case__ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
raise NotImplementedError()
| 707 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( _A :list[float] )-> Union[str, Any]:
return np.maximum(0 , _A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 185 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowercase : Dict ={
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
state_dict.pop("pixel_mean" , lowercase__ )
state_dict.pop("pixel_std" , lowercase__ )
UpperCAmelCase_ =R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ =key.replace(lowercase__ , lowercase__ )
if re.match(lowercase__ , lowercase__ ):
UpperCAmelCase_ =int(re.match(lowercase__ , lowercase__ ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase_ =key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
UpperCAmelCase_ =key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
UpperCAmelCase_ =key.replace("layers.2" , "proj_out" )
UpperCAmelCase_ =value
UpperCAmelCase_ =model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__="ybelkada/segment-anything" ):
'''simple docstring'''
UpperCAmelCase_ =hf_hub_download(lowercase__ , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
UpperCAmelCase_ =SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase_ =SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
UpperCAmelCase_ =SamConfig(
vision_config=lowercase__ , )
elif "sam_vit_h" in model_name:
UpperCAmelCase_ =SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
UpperCAmelCase_ =SamConfig(
vision_config=lowercase__ , )
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
UpperCAmelCase_ =replace_keys(lowercase__ )
UpperCAmelCase_ =SamImageProcessor()
UpperCAmelCase_ =SamProcessor(image_processor=lowercase__ )
UpperCAmelCase_ =SamModel(lowercase__ )
hf_model.load_state_dict(lowercase__ )
UpperCAmelCase_ =hf_model.to("cuda" )
UpperCAmelCase_ ="https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" )
UpperCAmelCase_ =[[[4_0_0, 6_5_0]]]
UpperCAmelCase_ =[[1]]
UpperCAmelCase_ =processor(images=np.array(lowercase__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCAmelCase_ =processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCAmelCase_ =((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
UpperCAmelCase_ =processor(images=np.array(lowercase__ ) , input_boxes=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCAmelCase_ =[[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
UpperCAmelCase_ =[[1, 1]]
UpperCAmelCase_ =processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__lowercase : Optional[Any] =argparse.ArgumentParser()
__lowercase : List[Any] =["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowercase : List[Any] =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 54 |
"""simple docstring"""
import numpy as np
def UpperCamelCase__ ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Dict , lowercase__ : List[str] ):
snake_case : Optional[int] = int(np.ceil((x_end - xa) / h ) )
snake_case : int = np.zeros((n + 1,) )
snake_case : Optional[Any] = ya
snake_case : List[Any] = xa
for k in range(lowercase__ ):
snake_case : Tuple = f(lowercase__ , y[k] )
snake_case : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case : Dict = f(x + h , y[k] + h * ka )
snake_case : str = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , a_ , a_=13 , a_=30 , a_=2 , a_=3 , a_=True , a_=True , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=10 , a_=0.02 , a_=3 , a_=0.6 , a_=None , ):
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = mask_ratio
a__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _a ( self ):
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _a ( self , a_ , a_ , a_ ):
a__ = ViTMAEModel(config=a_ )
model.to(a_ )
model.eval()
a__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ ):
a__ = ViTMAEForPreTraining(a_ )
model.to(a_ )
model.eval()
a__ = model(a_ )
a__ = (self.image_size // self.patch_size) ** 2
a__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a__ = 1
a__ = ViTMAEForPreTraining(a_ )
model.to(a_ )
model.eval()
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(a_ )
a__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _a ( self ):
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase__ : int = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : int = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : List[str] = False
def _a ( self ):
a__ = ViTMAEModelTester(self )
a__ = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def _a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _a ( self ):
pass
def _a ( self ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def _a ( self ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(a_ )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
def _a ( self ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def _a ( self , a_ , a_ , a_ ):
# make masks reproducible
np.random.seed(2 )
a__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
a__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a__ = torch.from_numpy(a_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a__ = pt_noise
super().check_pt_tf_models(a_ , a_ , a_ )
def _a ( self ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(a_ )
model.to(a_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a__ = model(**self._prepare_for_class(a_ , a_ ) )
a__ = outputs[0].cpu().numpy()
a__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
a__ = model_class.from_pretrained(a_ )
model.to(a_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a__ = model(**self._prepare_for_class(a_ , a_ ) )
# Make sure we don't have nans
a__ = after_outputs[0].cpu().numpy()
a__ = 0
a__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a_ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _a ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ):
pass
@slow
def _a ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = ViTMAEModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A_ ( ):
"""simple docstring"""
a__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _a ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
a__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(a_ )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a__ = ViTMAEConfig()
a__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
a__ = model(**a_ , noise=torch.from_numpy(a_ ).to(device=a_ ) )
# verify the logits
a__ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , a_ )
a__ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a_ ) , atol=1E-4 ) )
| 351 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 351 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
def is_in_circle(UpperCAmelCase_ : float, UpperCAmelCase_ : float ) -> bool:
A__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A__ = mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(UpperCAmelCase_ ) )
# The ratio of the area for circle to square is pi/4.
A__ = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : Callable[[float], float], UpperCAmelCase_ : float = 0.0, UpperCAmelCase_ : float = 1.0, ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCAmelCase_, UpperCAmelCase_ ) ) for _ in range(UpperCAmelCase_ ) ) * (max_value - min_value)
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : float = 0.0, UpperCAmelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(UpperCAmelCase_ : float ) -> float:
return x
A__ = area_under_curve_estimator(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
A__ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(UpperCAmelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
A__ = area_under_curve_estimator(
UpperCAmelCase_, UpperCAmelCase_, 0.0, 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ (a_ ):
def __init__( self , _A = "▁" , _A = True , _A = "<unk>" , _A = "</s>" , _A = "<pad>" , ):
'''simple docstring'''
UpperCAmelCase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['''token''']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_A , add_prefix_space=_A ),
pre_tokenizers.Digits(individual_digits=_A ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=_A , add_prefix_space=_A )
UpperCAmelCase = TemplateProcessing(
single=F"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
UpperCAmelCase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_A , _A )
def _lowercase ( self , _A , _A = 8_0_0_0 , _A = True , ):
'''simple docstring'''
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
if isinstance(_A , _A ):
UpperCAmelCase = [files]
self._tokenizer.train(_A , trainer=_A )
self.add_unk_id()
def _lowercase ( self , _A , _A = 8_0_0_0 , _A = True , ):
'''simple docstring'''
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
self._tokenizer.train_from_iterator(_A , trainer=_A )
self.add_unk_id()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['''unk''']['''id''']
UpperCAmelCase = Tokenizer.from_str(json.dumps(_A ) )
| 130 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str=True , _UpperCamelCase : Optional[Any]="pt" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""add_prefix_space""": True} if isinstance(_UpperCamelCase , _UpperCamelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE = padding_side
return tokenizer(
[line] , max_length=_UpperCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = input_ids.ne(_UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Tuple="train" , snake_case__ : Optional[int]=None , snake_case__ : Any=None , snake_case__ : int=None , snake_case__ : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = Path(_a ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE = Path(_a ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE = max_source_length
SCREAMING_SNAKE_CASE = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file ) , _a ).rstrip('\n' )
SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file ) , _a ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _a ) else self.tokenizer
)
SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer , _a ) else self.tokenizer
SCREAMING_SNAKE_CASE = encode_line(_a , _a , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE = encode_line(_a , _a , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE = source_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = target_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : int ):
"""simple docstring"""
return [len(_a ) for x in Path(_a ).open().readlines()]
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = trim_batch(_a , _a )
SCREAMING_SNAKE_CASE = trim_batch(_a , _a , attention_mask=_a )
SCREAMING_SNAKE_CASE = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
a_ : str = getLogger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : List[List] ) -> Any:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_git_info()
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , 'git_log.json' ) )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=4 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase , **_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase ) as f:
return json.load(_UpperCamelCase )
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {
"""repo_id""": str(_UpperCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( _UpperCamelCase : Callable , _UpperCamelCase : Iterable ) -> Tuple:
'''simple docstring'''
return list(map(_UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCamelCase , 'wb' ) as f:
return pickle.dump(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
def remove_articles(_UpperCamelCase : Any ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCamelCase )
def white_space_fix(_UpperCamelCase : List[Any] ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = normalize_answer(_UpperCamelCase ).split()
SCREAMING_SNAKE_CASE = normalize_answer(_UpperCamelCase ).split()
SCREAMING_SNAKE_CASE = Counter(_UpperCamelCase ) & Counter(_UpperCamelCase )
SCREAMING_SNAKE_CASE = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
return normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for hypo, pred in zip(_UpperCamelCase , _UpperCamelCase ):
em += exact_match_score(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
em /= len(_UpperCamelCase )
return {"em": em}
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
return model_prefix.startswith('rag' )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE = """dropout_rate"""
for p in extra_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if not hasattr(_UpperCamelCase , _UpperCamelCase ) and not hasattr(_UpperCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
continue
SCREAMING_SNAKE_CASE = p if hasattr(_UpperCamelCase , _UpperCamelCase ) else equivalent_param[p]
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
return hparams, config
| 705 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.