code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'distilbert'
SCREAMING_SNAKE_CASE : Any = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Union[str, Any] ,lowercase__ : List[Any]=3_0_5_2_2 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : Optional[int]=False ,lowercase__ : Optional[Any]=6 ,lowercase__ : Optional[int]=1_2 ,lowercase__ : List[Any]=7_6_8 ,lowercase__ : Any=4 * 7_6_8 ,lowercase__ : Tuple=0.1 ,lowercase__ : Tuple=0.1 ,lowercase__ : List[str]="gelu" ,lowercase__ : List[Any]=0.0_2 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.2 ,lowercase__ : List[str]=0 ,**lowercase__ : Tuple ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = sinusoidal_pos_embds
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dim
__lowercase = hidden_dim
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation
__lowercase = initializer_range
__lowercase = qa_dropout
__lowercase = seq_classif_dropout
super().__init__(**lowercase__ ,pad_token_id=lowercase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : int ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 41 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Optional[Any]:
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = OPTConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=16 , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = embed_dim
lowerCamelCase_ = word_embed_proj_dim
lowerCamelCase_ = False
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **self.config_updates , )
lowerCamelCase_ = prepare_opt_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = TFOPTModel(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs_dict['input_ids']
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 )
@require_tf
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = TFOPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_input_embeddings() )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_input_embeddings() )
lowerCamelCase_ = _get_word_embedding_weight(SCREAMING_SNAKE_CASE_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , SCREAMING_SNAKE_CASE_ )
# check that weights remain the same after resizing
lowerCamelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ = False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ = False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( __UpperCamelCase ) -> int:
return tf.constant(__UpperCamelCase ,dtype=tf.intaa )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 99
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCamelCase_ = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase_ = tf.not_equal(SCREAMING_SNAKE_CASE_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_ = model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).last_hidden_state
lowerCamelCase_ = (1, 11, 512)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=4E-3 ) )
lowerCamelCase_ = tf.function(SCREAMING_SNAKE_CASE_ , jit_compile=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = xla_generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=4E-2 ) )
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = 'facebook/opt-350m'
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
lowerCamelCase_ = tf.function(SCREAMING_SNAKE_CASE_ , jit_compile=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = 'facebook/opt-125m'
lowerCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase_ = []
lowerCamelCase_ = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
for prompt in self.prompts:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(SCREAMING_SNAKE_CASE_ , max_length=10 )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = 'facebook/opt-350m'
lowerCamelCase_ = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'left'
# use different length sentences to test batching
lowerCamelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['input_ids']
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] )
lowerCamelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCamelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = 'facebook/opt-350m'
lowerCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase_ = []
lowerCamelCase_ = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
for prompt in self.prompts:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(SCREAMING_SNAKE_CASE_ , max_length=10 )
lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = '''layoutlmv3'''
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=50_265 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: int=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: List[Any]=3_072 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: str=512 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: int=1E-5 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: List[str]=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=1_024 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Any=32 , UpperCamelCase_: Any=128 , UpperCamelCase_: Optional[int]=64 , UpperCamelCase_: str=256 , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=224 , UpperCamelCase_: Any=3 , UpperCamelCase_: int=16 , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = max_ad_position_embeddings
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = has_relative_attention_bias
lowercase__ = rel_pos_bins
lowercase__ = max_rel_pos
lowercase__ = has_spatial_attention_bias
lowercase__ = rel_ad_pos_bins
lowercase__ = max_rel_ad_pos
lowercase__ = text_embed
lowercase__ = visual_embed
lowercase__ = input_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = classifier_dropout
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = version.parse('''1.12''' )
@property
def lowerCamelCase_ ( self: int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCamelCase_ ( self: Any ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return 12
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: "ProcessorMixin" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = dict(
processor(
UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , ) )
return inputs
| 43 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : int = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['OwlViTFeatureExtractor']
UpperCAmelCase_ : Tuple = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 44 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def A ( lowercase__ : Accelerator , lowercase__ : int = 16 ) -> Dict:
UpperCamelCase__ :Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase__ :Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :Union[str, Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :int = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :List[Any] = 8
else:
UpperCamelCase__ :Any = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase__ :Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
UpperCamelCase__ :List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def A ( lowercase__ : str , lowercase__ : Optional[int] ) -> Tuple:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
UpperCamelCase__ :List[str] = 2
# Initialize accelerator
UpperCamelCase__ :Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Tuple = config["""lr"""]
UpperCamelCase__ :List[str] = int(config["""num_epochs"""] )
UpperCamelCase__ :List[str] = int(config["""seed"""] )
UpperCamelCase__ :List[str] = int(config["""batch_size"""] )
UpperCamelCase__ :Any = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase__ )
def inner_training_loop(lowercase__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :List[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Any = AdamW(params=model.parameters() , lr=lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate scheduler
UpperCamelCase__ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ :Dict = model(**lowercase__ )
UpperCamelCase__ :Optional[int] = outputs.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :List[Any] = model(**lowercase__ )
UpperCamelCase__ :Optional[int] = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
UpperCamelCase__ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A ( ) -> List[Any]:
UpperCamelCase__ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase__ :Any = parser.parse_args()
UpperCamelCase__ :List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 45 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = "ylacombe/bark-small"
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Dict = "en_speaker_1"
_lowerCamelCase : List[str] = "This is a test string"
_lowerCamelCase : Optional[int] = "speaker_embeddings_path.json"
_lowerCamelCase : Dict = "speaker_embeddings"
def _lowercase ( self: str ,**__lowerCAmelCase: int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint ,**__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : List[str] = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
_lowerCamelCase : Any = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
_lowerCamelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="(BOS)" ,eos_token="(EOS)" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
_lowerCamelCase : Union[str, Any] = 35
_lowerCamelCase : Optional[Any] = 2
_lowerCamelCase : int = 8
_lowerCamelCase : Union[str, Any] = {
"semantic_prompt": np.ones(__lowerCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCamelCase : Union[str, Any] = processor(text=self.input_string ,voice_preset=__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__lowerCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname ,"file.npz" )
np.savez(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : str = processor(text=self.input_string ,voice_preset=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__lowerCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCamelCase : Optional[Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = BarkProcessor(tokenizer=__lowerCAmelCase )
_lowerCamelCase : Tuple = processor(text=self.input_string )
_lowerCamelCase : Union[str, Any] = tokenizer(
self.input_string ,padding="max_length" ,max_length=256 ,add_special_tokens=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() ) | 46 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
import os
SCREAMING_SNAKE_CASE__ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : Optional[Any] = 0
__a : Dict = 0
while index < len(lowerCamelCase_ ) - 1:
__a : Optional[Any] = SYMBOLS[numerals[index]]
__a : Union[str, Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Optional[Any] = ''
__a : int = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
__a : Tuple = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
__a : Optional[int] = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( lowerCamelCase_ : str = "/p089_roman.txt" ):
__a : List[Any] = 0
with open(os.path.dirname(lowerCamelCase_ ) + roman_numerals_filename ) as filea:
__a : Tuple = filea.readlines()
for line in lines:
__a : str = line.strip()
__a : Dict = parse_roman_numerals(lowerCamelCase_ )
__a : str = generate_roman_numerals(lowerCamelCase_ )
savings += len(lowerCamelCase_ ) - len(lowerCamelCase_ )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[Any] = KandinskyInpaintPipeline
snake_case__ :str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ :List[str] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ :List[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ :Tuple = False
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return 32
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return 100
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCAmelCase__ = MultilingualCLIP(__magic_name__ )
lowerCAmelCase__ = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ = UNetaDConditionModel(**__magic_name__ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="epsilon" , thresholding=__magic_name__ , )
lowerCAmelCase__ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(__magic_name__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(__magic_name__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(__magic_name__ )
else:
lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCAmelCase__ = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**__magic_name__ )
lowerCAmelCase__ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(__magic_name__ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase__ = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = "a hat"
lowerCAmelCase__ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
lowerCAmelCase__ = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ = pipeline(
__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 48 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int , snake_case_ :int ):
return base * power(snake_case_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
_lowercase : Dict = int(input('Enter the base: ').strip())
_lowercase : Optional[Any] = int(input('Enter the exponent: ').strip())
_lowercase : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowercase : List[str] = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 49 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def A__ ( __lowerCAmelCase : int = 8 , __lowerCAmelCase : int | None = None ):
lowerCamelCase__ = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCamelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
lowerCamelCase__ = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCamelCase__ = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
lowerCamelCase__ = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCamelCase__ = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCamelCase__ = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 50 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =RoCBertTokenizer
_lowerCamelCase =None
_lowerCamelCase =False
_lowerCamelCase =True
_lowerCamelCase =filter_non_english
def __snake_case ( self : Any ):
super().setUp()
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCAmelCase = {}
UpperCAmelCase = {}
for i, value in enumerate(a__ ):
UpperCAmelCase = i
UpperCAmelCase = i
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(a__ , a__ , ensure_ascii=a__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(a__ , a__ , ensure_ascii=a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(a__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Tuple ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Tuple ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : int ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase = {}
for i, token in enumerate(a__ ):
UpperCAmelCase = i
UpperCAmelCase = RoCBertWordpieceTokenizer(vocab=a__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __snake_case ( self : int ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __snake_case ( self : Union[str, Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __snake_case ( self : Any ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(a__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __snake_case ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(
a__ , return_attention_mask=a__ , return_token_type_ids=a__ , return_offsets_mapping=a__ , add_special_tokens=a__ , )
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(a__ , '''do_lower_case''' ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __snake_case ( self : List[str] ):
UpperCAmelCase = ['''的''', '''人''', '''有''']
UpperCAmelCase = ''''''.join(a__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = tokenizer_p.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_r.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(a__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = tokenizer_r.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_p.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(a__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(a__ )
]
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase = tokenizer.encode('''你好''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.encode('''你是谁''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __snake_case ( self : str ):
UpperCAmelCase = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = '''你好,你是谁'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
UpperCAmelCase = tokenizer.convert_tokens_to_shape_ids(a__ )
UpperCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(a__ )
UpperCAmelCase = tokenizer.prepare_for_model(
a__ , a__ , a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.encode_plus(a__ , add_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 51 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( a_ :BertModel , a_ :str , a_ :str) -> str:
__a : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__a : Any = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(a_):
os.makedirs(a_)
__a : List[Any] = model.state_dict()
def to_tf_var_name(a_ :str):
for patt, repl in iter(a_):
__a : int = name.replace(a_ , a_)
return F"""bert/{name}"""
def create_tf_var(a_ :np.ndarray , a_ :str , a_ :tf.Session):
__a : int = tf.dtypes.as_dtype(tensor.dtype)
__a : Optional[int] = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(a_)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Any = to_tf_var_name(a_)
__a : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
__a : List[Any] = torch_tensor.T
__a : Optional[Any] = create_tf_var(tensor=a_ , name=a_ , session=a_)
tf.keras.backend.set_value(a_ , a_)
__a : int = session.run(a_)
print(F"""Successfully created {tf_name}: {np.allclose(a_ , a_)}""")
__a : Tuple = tf.train.Saver(tf.trainable_variables())
saver.save(a_ , os.path.join(a_ , model_name.replace('''-''' , '''_''') + '''.ckpt'''))
def __A ( a_ :int=None) -> str:
__a : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , required=a_ , help='''model name e.g. bert-base-uncased''')
parser.add_argument(
'''--cache_dir''' , type=a_ , default=a_ , required=a_ , help='''Directory containing pytorch model''')
parser.add_argument('''--pytorch_model_path''' , type=a_ , required=a_ , help='''/path/to/<pytorch-model-name>.bin''')
parser.add_argument('''--tf_cache_dir''' , type=a_ , required=a_ , help='''Directory in which to save tensorflow model''')
__a : Optional[Any] = parser.parse_args(a_)
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name)
if __name__ == "__main__":
main() | 52 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =HfArgumentParser(lowercase__ )
UpperCAmelCase_ =parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ =TensorFlowBenchmark(args=lowercase__ )
try:
UpperCAmelCase_ =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ ="Arg --no_{0} is no longer used, please use --no-{0} instead."
UpperCAmelCase_ =" ".join(str(lowercase__ ).split(" " )[:-1] )
UpperCAmelCase_ =""
UpperCAmelCase_ =eval(str(lowercase__ ).split(" " )[-1] )
UpperCAmelCase_ =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
UpperCAmelCase_ =full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 54 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = ShapEPipeline
snake_case_ = ["prompt"]
snake_case_ = ["prompt"]
snake_case_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def UpperCamelCase_ ( self : List[Any] ):
return 32
@property
def UpperCamelCase_ ( self : Optional[int] ):
return 32
@property
def UpperCamelCase_ ( self : Tuple ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 8
@property
def UpperCamelCase_ ( self : Tuple ):
__A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
__A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModelWithProjection(A )
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
__A = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__A = PriorTransformer(**A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__A = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__A = ShapERenderer(**A )
return model
def UpperCamelCase_ ( self : List[str] ):
__A = self.dummy_prior
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_renderer
__A = HeunDiscreteScheduler(
beta_schedule="exp" ,num_train_timesteps=10_24 ,prediction_type="sample" ,use_karras_sigmas=A ,clip_sample=A ,clip_sample_range=1.0 ,)
__A = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCamelCase_ ( self : Dict ,A : Optional[Any] ,A : Any=0 ):
if str(A ).startswith("mps" ):
__A = torch.manual_seed(A )
else:
__A = torch.Generator(device=A ).manual_seed(A )
__A = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCamelCase_ ( self : List[str] ):
__A = "cpu"
__A = self.get_dummy_components()
__A = self.pipeline_class(**A )
__A = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__A = pipe(**self.get_dummy_inputs(A ) )
__A = output.images[0]
__A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : int ):
__A = torch_device == "cpu"
__A = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=A ,relax_max_difference=A ,)
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_dummy_components()
__A = self.pipeline_class(**A )
__A = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__A = 1
__A = 2
__A = self.get_dummy_inputs(A )
for key in inputs.keys():
if key in self.batch_params:
__A = batch_size * [inputs[key]]
__A = pipe(**A ,num_images_per_prompt=A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
__A = ShapEPipeline.from_pretrained("openai/shap-e" )
__A = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__A = torch.Generator(device=A ).manual_seed(0 )
__A = pipe(
"a shark" ,generator=A ,guidance_scale=15.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="np" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A ,A )
| 55 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_a : List[Any] = logging.get_logger(__name__)
def _a (lowercase__ : int , lowercase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
try:
with open(lowercase__ , 'rb' ) as flax_state_f:
__snake_case = from_bytes(lowercase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
def _a (lowercase__ : int , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case = jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ )
__snake_case = ''
__snake_case = flatten_dict(lowercase__ , sep='.' )
__snake_case = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = jnp.transpose(lowercase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
__snake_case = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__snake_case = '.'.join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
__snake_case = list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase__ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 56 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : int = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''vit_mae'''
def __init__( self , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1_6 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=1_6 , _lowerCamelCase=5_1_2 , _lowerCamelCase=8 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.7_5 , _lowerCamelCase=False , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: Any = hidden_size
UpperCamelCase_: Any = num_hidden_layers
UpperCamelCase_: Tuple = num_attention_heads
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Any = hidden_dropout_prob
UpperCamelCase_: Optional[int] = attention_probs_dropout_prob
UpperCamelCase_: Any = initializer_range
UpperCamelCase_: List[str] = layer_norm_eps
UpperCamelCase_: Union[str, Any] = image_size
UpperCamelCase_: Any = patch_size
UpperCamelCase_: Any = num_channels
UpperCamelCase_: str = qkv_bias
UpperCamelCase_: str = decoder_num_attention_heads
UpperCamelCase_: Dict = decoder_hidden_size
UpperCamelCase_: Dict = decoder_num_hidden_layers
UpperCamelCase_: Optional[Any] = decoder_intermediate_size
UpperCamelCase_: List[str] = mask_ratio
UpperCamelCase_: Tuple = norm_pix_loss | 57 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCAmelCase : Dict = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : int = torchvision.models.resnetaaa(pretrained=_lowercase )
snake_case_ : Tuple = list(model.children() )[:-2]
snake_case_ : str = nn.Sequential(*_lowercase )
snake_case_ : Union[str, Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.pool(self.model(_lowercase ) )
snake_case_ : Any = torch.flatten(_lowercase , start_dim=2 )
snake_case_ : str = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = [json.loads(_lowercase ) for l in open(_lowercase )]
snake_case_ : List[str] = os.path.dirname(_lowercase )
snake_case_ : str = tokenizer
snake_case_ : Dict = labels
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Any = max_seq_length
snake_case_ : Union[str, Any] = transforms
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.data )
def __getitem__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=_lowercase ) )
snake_case_ , snake_case_ , snake_case_ : List[str] = sentence[0], sentence[1:-1], sentence[-1]
snake_case_ : Any = sentence[: self.max_seq_length]
snake_case_ : Dict = torch.zeros(self.n_classes )
snake_case_ : Dict = 1
snake_case_ : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
snake_case_ : str = self.transforms(_lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Tuple = [len(row["""sentence"""] ) for row in batch]
snake_case_ , snake_case_ : str = len(__UpperCamelCase ), max(__UpperCamelCase )
snake_case_ : Optional[int] = torch.zeros(__UpperCamelCase , __UpperCamelCase , dtype=torch.long )
snake_case_ : Any = torch.zeros(__UpperCamelCase , __UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
snake_case_ : Optional[int] = input_row["""sentence"""]
snake_case_ : Any = 1
snake_case_ : str = torch.stack([row["""image"""] for row in batch] )
snake_case_ : List[Any] = torch.stack([row["""label"""] for row in batch] )
snake_case_ : Dict = torch.stack([row["""image_start_token"""] for row in batch] )
snake_case_ : Optional[Any] = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __lowerCAmelCase ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __lowerCAmelCase ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 58 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase_ ( __a ) -> list[tuple[int, int]]:
"""simple docstring"""
lowerCamelCase__: Dict =0
lowerCamelCase__: Union[str, Any] =len(__a ) # No of vertices in graph
lowerCamelCase__: Optional[int] =[0] * n
lowerCamelCase__: List[str] =[False] * n
def dfs(__a , __a , __a , __a ):
lowerCamelCase__: Dict =True
lowerCamelCase__: List[Any] =id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__a , __a , __a , id_ )
lowerCamelCase__: str =min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase__: str =min(low[at] , low[to] )
lowerCamelCase__: list[tuple[int, int]] =[]
for i in range(__a ):
if not visited[i]:
dfs(__a , -1 , __a , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
from math import factorial
lowerCAmelCase_ = {str(d): factorial(d) for d in range(1_0)}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCamelCase ) )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _UpperCamelCase ) if sum_of_digit_factorial(_UpperCamelCase ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
from __future__ import annotations
from collections.abc import Callable
def _A ( lowerCAmelCase_ : Callable[[int | float], int | float] , lowerCAmelCase_ : int | float , lowerCAmelCase_ : int | float , lowerCAmelCase_ : int = 100 , ):
"""simple docstring"""
lowerCAmelCase__ = x_start
lowerCAmelCase__ = fnc(lowerCAmelCase_ )
lowerCAmelCase__ = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase__ = (x_end - x_start) / steps + xa
lowerCAmelCase__ = fnc(lowerCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCAmelCase__ = xa
lowerCAmelCase__ = fxa
return area
if __name__ == "__main__":
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
UpperCamelCase = 10
while i <= 10_0000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 61 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : bytes ):
SCREAMING_SNAKE_CASE : Dict = data
# Initialize hash values
SCREAMING_SNAKE_CASE : List[str] = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE : List[str] = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
SCREAMING_SNAKE_CASE : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _A ( UpperCAmelCase_ : bytes ):
SCREAMING_SNAKE_CASE : Optional[int] = b"\x80" + (b"\x00" * (63 - (len(UpperCAmelCase_ ) + 8) % 64))
SCREAMING_SNAKE_CASE : List[Any] = struct.pack(">Q" , (len(UpperCAmelCase_ ) * 8) )
return data + padding + big_endian_integer
def _A ( self : Optional[Any] ):
# Convert into blocks of 64 bytes
SCREAMING_SNAKE_CASE : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE : Union[str, Any] = list(struct.unpack(">16L" , UpperCAmelCase_ ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE : Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE : int = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE : List[str] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
SCREAMING_SNAKE_CASE : Any = self.ror(UpperCAmelCase_ , 6 ) ^ self.ror(UpperCAmelCase_ , 11 ) ^ self.ror(UpperCAmelCase_ , 25 )
SCREAMING_SNAKE_CASE : Any = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
SCREAMING_SNAKE_CASE : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE : List[Any] = self.ror(UpperCAmelCase_ , 2 ) ^ self.ror(UpperCAmelCase_ , 13 ) ^ self.ror(UpperCAmelCase_ , 22 )
SCREAMING_SNAKE_CASE : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE : Optional[int] = (sa + maj) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
SCREAMING_SNAKE_CASE : List[str] = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE : Union[str, Any] = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE : Any = "".join([hex(UpperCAmelCase_ )[2:].zfill(8 ) for value in self.hashes] )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
import hashlib
SCREAMING_SNAKE_CASE : int = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(UpperCAmelCase_ ).hash , hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() )
def lowerCamelCase__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
SCREAMING_SNAKE_CASE : int = f.read()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = bytes(lowercase , "utf-8" )
print(SHAaaa(lowercase ).hash )
if __name__ == "__main__":
main()
| 62 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ): # This function is recursive
__UpperCAmelCase : Optional[Any] = len(__lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase : str = array[0]
__UpperCAmelCase : int = False
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase : List[str] = longest_subsequence(__lowerCamelCase )
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
__UpperCAmelCase : str = temp_array
else:
i += 1
__UpperCAmelCase : Tuple = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase : Union[str, Any] = [pivot, *longest_subsequence(__lowerCamelCase )]
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
lowercase_ : Optional[Any] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ : Optional[int] = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( snake_case_ : str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( snake_case_ : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[Any]= '''Morse code here!'''
print(snake_case_ )
SCREAMING_SNAKE_CASE__: Any= encrypt(snake_case_ )
print(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= decrypt(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
main()
| 64 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class __lowercase ( __lowerCamelCase ):
snake_case_ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Whether to SortishSamler or not."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """whether to use adafactor"""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
snake_case_ = field(
default="""linear""" , metadata={"""help""": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 65 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
UpperCamelCase = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCamelCase = frozenset(["prompt", "negative_prompt"])
UpperCamelCase = frozenset([])
UpperCamelCase = frozenset(["image"])
UpperCamelCase = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
UpperCamelCase = frozenset(["image"])
UpperCamelCase = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCamelCase = frozenset(["prompt", "image", "negative_prompt"])
UpperCamelCase = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCamelCase = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
UpperCamelCase = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCamelCase = frozenset(["image", "mask_image"])
UpperCamelCase = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCamelCase = frozenset(["example_image", "image", "mask_image"])
UpperCamelCase = frozenset(["class_labels"])
UpperCamelCase = frozenset(["class_labels"])
UpperCamelCase = frozenset(["batch_size"])
UpperCamelCase = frozenset([])
UpperCamelCase = frozenset(["batch_size"])
UpperCamelCase = frozenset([])
UpperCamelCase = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCamelCase = frozenset(["prompt", "negative_prompt"])
UpperCamelCase = frozenset(["input_tokens"])
UpperCamelCase = frozenset(["input_tokens"])
| 66 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
snake_case = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
snake_case = [ord(letter) for letter in string.ascii_lowercase]
snake_case = {ord(char) for char in VALID_CHARS}
snake_case = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] , snake_case__ :tuple[int, ...] ) -> str | None:
_lowercase = ""
_lowercase = 42
_lowercase = 42
_lowercase = 42
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_lowercase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] ) -> list[str]:
_lowercase = []
for key in product(snake_case__ , repeat=3 ):
_lowercase = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[str] , snake_case__ :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str = "p059_cipher.txt" ) -> int:
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding='utf-8' )
_lowercase = [int(snake_case__ ) for number in data.strip().split(',' )]
_lowercase = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_lowercase = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_lowercase = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
def A ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = GPTSwaTokenizer(a_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : str , a_ : List[Any] ):
"""simple docstring"""
__snake_case = "This is a test"
__snake_case = "This is a test"
return input_text, output_text
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "<s>"
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a_ ) , 2_000 )
def A ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = GPTSwaTokenizer(a_ )
__snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [465, 287, 265, 631, 842] )
__snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__snake_case = tokenizer.convert_ids_to_tokens(a_ )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = GPTSwaTokenizer(a_ )
__snake_case = ["This is a test", "I was born in 92000, and this is falsé."]
__snake_case = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a_ , a_ ):
self.assertListEqual(tokenizer.encode_fast(a_ ) , a_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a_ , a_ ):
self.assertEqual(tokenizer.decode_fast(a_ ) , a_ )
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
__snake_case = {"input_ids": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=a_ , )
| 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = git.Repo(search_parent_directories=lowercase )
lowerCamelCase_ = {
'repo_id': str(lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase , lowercase , indent=4 )
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
if params.n_gpu <= 0:
lowerCamelCase_ = 0
lowerCamelCase_ = -1
lowerCamelCase_ = True
lowerCamelCase_ = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCamelCase_ = int(os.environ['WORLD_SIZE'] )
lowerCamelCase_ = int(os.environ['N_GPU_NODE'] )
lowerCamelCase_ = int(os.environ['RANK'] )
# number of nodes / node ID
lowerCamelCase_ = params.world_size // params.n_gpu_per_node
lowerCamelCase_ = params.global_rank // params.n_gpu_per_node
lowerCamelCase_ = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCamelCase_ = 1
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCamelCase_ = params.node_id == 0 and params.local_rank == 0
lowerCamelCase_ = params.n_nodes > 1
# summary
lowerCamelCase_ = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 70 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowerCamelCase = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
_lowerCamelCase = None
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_SCREAMING_SNAKE_CASE , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_SCREAMING_SNAKE_CASE , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[int] ):
return ARTICLES_REGEX.sub(" " , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase_ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = get_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = get_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ : Dict = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ : Union[str, Any] = qa["id"]
UpperCAmelCase_ : str = [t for t in qa["answers"]["text"] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ : int = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCAmelCase_ : Dict = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ : Any = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
UpperCAmelCase_ : str = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = {}
for qid, s in scores.items():
UpperCAmelCase_ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ : Tuple = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ : Any = s
return new_scores
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=None ) -> int:
"""simple docstring"""
if not qid_list:
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ : str = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for k in new_eval:
UpperCAmelCase_ : int = new_eval[k]
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
UpperCAmelCase_ : List[Any] = 0.0
UpperCAmelCase_ : Dict = 1.0
UpperCAmelCase_ : Union[str, Any] = 0.0
UpperCAmelCase_ : Dict = [1.0]
UpperCAmelCase_ : List[str] = [0.0]
UpperCAmelCase_ : Any = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ : Optional[Any] = true_pos / float(i + 1 )
UpperCAmelCase_ : Any = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ : Optional[int] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ : Tuple = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ : Union[str, Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ : Optional[int] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_exact" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_f1" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_oracle" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
if not qid_list:
return
UpperCAmelCase_ : List[Any] = [na_probs[k] for k in qid_list]
UpperCAmelCase_ : Dict = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ : List[str] = num_no_ans
UpperCAmelCase_ : Optional[int] = cur_score
UpperCAmelCase_ : Any = 0.0
UpperCAmelCase_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ : Tuple = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ : int = -1
else:
UpperCAmelCase_ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ : int = cur_score
UpperCAmelCase_ : Tuple = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : int = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = best_exact
UpperCAmelCase_ : int = exact_thresh
UpperCAmelCase_ : int = best_fa
UpperCAmelCase_ : Union[str, Any] = fa_thresh
def a__ ( ) -> str:
"""simple docstring"""
with open(OPTS.data_file ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ : int = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
UpperCAmelCase_ : Tuple = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
UpperCAmelCase_ : Dict = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ : Dict = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
UpperCAmelCase_ : Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
UpperCAmelCase_ : Optional[int] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
UpperCAmelCase_ : Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ : Any = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
_lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 71 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
_UpperCAmelCase : Union[str, Any] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase =bs[:]
lowercase =0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
lowercase =[chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def UpperCamelCase ( lowercase_ : int ) -> Optional[int]:
'''simple docstring'''
lowercase =set()
lowercase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase =char
return pairs
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ):
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
lowercase =json.load(snake_case_ )
lowercase ={v: k for k, v in self.encoder.items()}
lowercase =errors # how to handle errors in decoding
lowercase =bytes_to_unicode()
lowercase ={v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
lowercase =merges_handle.read().split('''\n''' )[1:-1]
lowercase =[tuple(merge.split() ) for merge in bpe_merges]
lowercase =dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase ={}
lowercase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _A( self ):
return len(self.encoder )
def _A( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A( self , snake_case_ ):
if token in self.cache:
return self.cache[token]
lowercase =tuple(snake_case_ )
lowercase =get_pairs(snake_case_ )
if not pairs:
return token
while True:
lowercase =min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase =bigram
lowercase =[]
lowercase =0
while i < len(snake_case_ ):
try:
lowercase =word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase =j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase =tuple(snake_case_ )
lowercase =new_word
if len(snake_case_ ) == 1:
break
else:
lowercase =get_pairs(snake_case_ )
lowercase =''' '''.join(snake_case_ )
lowercase =word
return word
def _A( self , snake_case_ ):
lowercase =[]
for token in re.findall(self.pat , snake_case_ ):
lowercase =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def _A( self , snake_case_ ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def _A( self , snake_case_ ):
return self.decoder.get(snake_case_ )
def _A( self , snake_case_ ):
lowercase =''''''.join(snake_case_ )
lowercase =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _A( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
lowercase =0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase =token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _A( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A( self , snake_case_ , snake_case_=False , **snake_case_ ):
lowercase =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
lowercase =''' ''' + text
return (text, kwargs)
| 72 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
try:
SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE = strtobool(_UpperCAmelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''')
return _value
a_ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a_ : int = parse_flag_from_env('RUN_REMOTE', default=False)
a_ : List[str] = parse_flag_from_env('RUN_LOCAL', default=True)
a_ : Dict = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a_ : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a_ : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a_ : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a_ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a_ : int = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a_ : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a_ : Optional[int] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCamelCase__ (_UpperCAmelCase):
try:
import faiss # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires faiss')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import regex # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires regex')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import elasticsearch # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires elasticsearch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import sqlalchemy # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires sqlalchemy')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TORCH_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires PyTorch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TF_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires TensorFlow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.JAX_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires JAX')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.PIL_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires Pillow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
def _require_spacy_model(_UpperCAmelCase):
try:
import spacy # noqa F401
spacy.load(_UpperCAmelCase)
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_UpperCAmelCase))(_UpperCAmelCase)
else:
return test_case
return _require_spacy_model
def lowerCamelCase__ (_UpperCAmelCase):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_slow_tests or _run_slow_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is slow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_local_tests or _run_local_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is local')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_packaged_tests or _run_packaged_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is packaged')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_remote_tests or _run_remote_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test requires remote')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (*_UpperCAmelCase):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(_UpperCAmelCase) and name.startswith('test'):
for decorator in decorators:
SCREAMING_SNAKE_CASE = decorator(_UpperCAmelCase)
setattr(cls , _UpperCAmelCase , _UpperCAmelCase)
return cls
return decorate
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
_lowercase : Optional[Any] = 0
_lowercase : Optional[Any] = 1
_lowercase : Optional[int] = 2
@contextmanager
def lowerCamelCase__ (_UpperCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCAmelCase=1e-16):
SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
# Change the url to an invalid url so that the connection hangs
SCREAMING_SNAKE_CASE = 'https://10.255.255.1'
if kwargs.get('timeout') is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
SCREAMING_SNAKE_CASE = timeout
try:
return online_request(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
SCREAMING_SNAKE_CASE = url
SCREAMING_SNAKE_CASE = e.args[0]
SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]'''),)
SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
raise requests.ConnectionError('Offline mode is enabled.' , request=_UpperCAmelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.')
@contextmanager
def lowerCamelCase__ (*_UpperCAmelCase , **_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(Path().resolve())
with tempfile.TemporaryDirectory(*_UpperCAmelCase , **_UpperCAmelCase) as tmp_dir:
try:
os.chdir(_UpperCAmelCase)
yield
finally:
os.chdir(_UpperCAmelCase)
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist() == deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist()
def lowerCamelCase__ (_UpperCAmelCase):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
try:
return func(*_UpperCAmelCase , **_UpperCAmelCase)
except HTTPError as err:
if str(_UpperCAmelCase).startswith('500') or str(_UpperCAmelCase).startswith('502'):
pytest.xfail(str(_UpperCAmelCase))
raise err
return decorator.decorator(_wrapper , _UpperCAmelCase)
class _snake_case :
def __init__( self , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = returncode
SCREAMING_SNAKE_CASE = stdout
SCREAMING_SNAKE_CASE = stderr
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
while True:
SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(_UpperCAmelCase)
else:
break
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False):
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def tee(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=""):
SCREAMING_SNAKE_CASE = line.decode('utf-8').rstrip()
sink.append(_UpperCAmelCase)
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:')),
_read_stream(p.stderr , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:')),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=180 , _UpperCAmelCase=False , _UpperCAmelCase=True):
SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase))
SCREAMING_SNAKE_CASE = ' '.join(_UpperCAmelCase)
if result.returncode > 0:
SCREAMING_SNAKE_CASE = '\n'.join(result.stderr)
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''')
return result
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0')
SCREAMING_SNAKE_CASE = re.sub(R'^gw' , '' , _UpperCAmelCase , 0 , re.M)
return int(_UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 2_9500
SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 73 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def a__ ( snake_case ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
TestCommand.register_subcommand(snake_case )
RunBeamCommand.register_subcommand(snake_case )
DummyDataCommand.register_subcommand(snake_case )
# Parse args
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = parser.parse_known_args()
if not hasattr(snake_case , '''func''' ):
parser.print_help()
exit(1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_unknown_args(snake_case )
# Run
__SCREAMING_SNAKE_CASE : List[str] = args.func(snake_case , **snake_case )
service.run()
if __name__ == "__main__":
main()
| 74 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase = 1_00 ):
__lowercase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
__lowercase : Any = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 76 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A = """facebook/wmt19-en-de"""
A = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
A = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
A = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
A = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 77 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> Union[str, Any]:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) )
else:
return a * actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(snake_case_ , snake_case_ )
return actual_power(snake_case_ , snake_case_ )
if __name__ == "__main__":
print(power(-2, -3))
| 78 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCAmelCase__ : List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
UpperCAmelCase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase__ : int = self.get_image_processor()
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
UpperCAmelCase__ : Dict = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ : str = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase__ : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : str = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : str = self.prepare_image_inputs()
UpperCAmelCase__ : str = image_processor(_lowerCAmelCase , return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = """lower newer"""
UpperCAmelCase__ : int = processor(text=_lowerCAmelCase )
UpperCAmelCase__ : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = """lower newer"""
UpperCAmelCase__ : int = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[int] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.get_image_processor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : Any = processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = self.get_image_processor()
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : Any = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : str = """lower newer"""
UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase__ : List[Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 79 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
__UpperCamelCase : Tuple = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Optional[int] = 2
class a :
"""simple docstring"""
def __init__( self : int , *, # begin keyword-only arguments
lowerCamelCase : List[Any]="<s>" , lowerCamelCase : List[Any]="<pad>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : Optional[Any]=None , ) -> Optional[int]:
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = bos, unk, pad, eos
__snake_case : Tuple = []
__snake_case : Dict = []
__snake_case : List[str] = {}
__snake_case : Tuple = self.add_symbol(lowerCamelCase )
__snake_case : List[Any] = self.add_symbol(lowerCamelCase )
__snake_case : Dict = self.add_symbol(lowerCamelCase )
__snake_case : List[str] = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
__snake_case : Any = len(self.symbols )
def __eq__( self : Any , lowerCamelCase : Optional[Any] ) -> int:
return self.indices == other.indices
def __getitem__( self : Union[str, Any] , lowerCamelCase : Dict ) -> Any:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ) -> Optional[int]:
return len(self.symbols )
def __contains__( self : str , lowerCamelCase : str ) -> Any:
return sym in self.indices
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Optional[Any] = cls()
d.add_from_file(lowerCamelCase )
return d
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str]=1 , lowerCamelCase : int=False ) -> Union[str, Any]:
if word in self.indices and not overwrite:
__snake_case : Optional[int] = self.indices[word]
__snake_case : Optional[Any] = self.count[idx] + n
return idx
else:
__snake_case : Dict = len(self.symbols )
__snake_case : List[Any] = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple ) -> Any:
return 0
def __snake_case ( self : str , lowerCamelCase : Tuple ) -> List[str]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowerCamelCase ) )
return
__snake_case : Dict = f.readlines()
__snake_case : Dict = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
__snake_case , __snake_case : Union[str, Any] = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
__snake_case : int = True
__snake_case , __snake_case : List[str] = line.rsplit(" " , 1 )
else:
__snake_case : Union[str, Any] = False
__snake_case : Union[str, Any] = int(lowerCamelCase )
__snake_case : Dict = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def lowerCAmelCase_ ( __lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__snake_case : List[Any] = dict((re.sub(R"@@$" , "" , __lowerCamelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __lowerCamelCase ), v) for k, v in d.items() )
__snake_case : str = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__snake_case : Optional[int] = d[k] # restore
return da
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# prep
if not os.path.exists(__lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__snake_case : Dict = os.path.join(__lowerCamelCase , "checkpoint.pt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
__snake_case : List[Any] = chkpt["cfg"]["model"]
# dicts
__snake_case : str = os.path.join(__lowerCamelCase , "dict.txt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__snake_case : int = Dictionary.load(__lowerCamelCase )
__snake_case : Tuple = rewrite_dict_keys(src_dict.indices )
__snake_case : Tuple = len(__lowerCamelCase )
__snake_case : Optional[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
__snake_case : Dict = os.path.join(__lowerCamelCase , "bpecodes" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
# model config
__snake_case : List[str] = os.path.join(__lowerCamelCase , "config.json" )
__snake_case : Optional[Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
__snake_case : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase )
__snake_case : int = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_0_2_4,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
__snake_case : Dict = chkpt["model"]
# remove unneeded keys
__snake_case : int = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
__snake_case : str = model_state_dict.pop(__lowerCamelCase )
else:
__snake_case : Any = model_state_dict.pop(__lowerCamelCase )
__snake_case : Dict = BioGptConfig.from_pretrained(__lowerCamelCase )
__snake_case : Any = BioGptForCausalLM(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase )
# save
__snake_case : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print("Conversion is done!" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = DebertaTokenizer
snake_case__ : Optional[int] = True
snake_case__ : Dict = DebertaTokenizerFast
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
_lowerCamelCase : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : Dict = {'''unk_token''': '''[UNK]'''}
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : str = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : List[Any] = '''lower newer'''
_lowerCamelCase : int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : Tuple = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = tokenizer('''Hello''' , '''World''' )
_lowerCamelCase : Any = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
_lowerCamelCase : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowerCamelCase : List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
_lowerCamelCase : Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
_lowerCamelCase : Optional[int] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
_lowerCamelCase : Tuple = [tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
_lowerCamelCase : Any = {
'''input_ids''': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCamelCase : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowerCAmelCase )
for expected, decoded in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 83 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return "".join(sorted(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return word_by_signature[signature(__SCREAMING_SNAKE_CASE )]
UpperCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 84 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
SCREAMING_SNAKE_CASE__ : List[str] = {
'input_ids': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : Dict = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 85 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__a :Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__a :List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__a :List[str] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__a :List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__a :str = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Dict ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=[1, 10, 100] , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=UpperCAmelCase ) as executor:
A_ = []
A_ = Counter()
A_ = 0
A_ = defaultdict(UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
for candidate in candidates:
A_ = candidate + "\n" + test_case
A_ = (test_program, timeout, task_id, completion_id[task_id])
A_ = executor.submit(UpperCAmelCase , *UpperCAmelCase )
futures.append(UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase ):
A_ = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
A_ , A_ = [], []
for result in results.values():
result.sort()
A_ = [r[1]["passed"] for r in result]
total.append(len(UpperCAmelCase ) )
correct.append(sum(UpperCAmelCase ) )
A_ = np.array(UpperCAmelCase )
A_ = np.array(UpperCAmelCase )
A_ = k
A_ = {f'''pass@{k}''': estimate_pass_at_k(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
def estimator(__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = itertools.repeat(__UpperCamelCase ,len(__UpperCamelCase ) )
else:
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = iter(__UpperCamelCase )
return np.array([estimator(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,__UpperCamelCase ) for n, c in zip(__UpperCamelCase ,__UpperCamelCase )] ) | 86 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def SCREAMING_SNAKE_CASE ( lowercase_ = True , *lowercase_ , **lowercase_ ) -> List[Any]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
A__ = False
if main_process_only:
A__ = PartialState().local_process_index == 0
return _tqdm(*lowercase_ , **lowercase_ , disable=lowercase_ )
| 87 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE : int = "hf-internal-testing/tiny-random-bert"
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
SCREAMING_SNAKE_CASE : List[Any] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = cached_file(lowerCamelCase, lowerCamelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCamelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCamelCase, lowerCamelCase)))
with open(os.path.join(lowerCamelCase, 'refs', 'main')) as f:
_lowercase : Optional[int] = f.read()
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, 'snapshots', lowerCamelCase, lowerCamelCase))
self.assertTrue(os.path.isfile(lowerCamelCase))
# File is cached at the same place the second time.
_lowercase : Any = cached_file(lowerCamelCase, lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
# Using a specific revision to test the full commit hash.
_lowercase : Dict = cached_file(lowerCamelCase, lowerCamelCase, revision='9b8c223')
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, 'snapshots', lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid model identifier'):
_lowercase : Tuple = cached_file('tiny-random-bert', lowerCamelCase)
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid git identifier'):
_lowercase : Optional[Any] = cached_file(lowerCamelCase, lowerCamelCase, revision='aaaa')
with self.assertRaisesRegex(lowerCamelCase, 'does not appear to have a file named'):
_lowercase : List[str] = cached_file(lowerCamelCase, 'conf')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase, 'does not appear to have a file named'):
_lowercase : str = cached_file(lowerCamelCase, 'conf')
with open(os.path.join(lowerCamelCase, 'refs', 'main')) as f:
_lowercase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, '.no_exist', lowerCamelCase, 'conf')))
_lowercase : List[Any] = cached_file(lowerCamelCase, 'conf', _raise_exceptions_for_missing_entries=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
_lowercase : Optional[Any] = cached_file(lowerCamelCase, 'conf', local_files_only=lowerCamelCase, _raise_exceptions_for_missing_entries=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
_lowercase : Dict = mock.Mock()
_lowercase : Any = 5_00
_lowercase : Optional[int] = {}
_lowercase : Optional[int] = HTTPError
_lowercase : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request', return_value=lowerCamelCase) as mock_head:
_lowercase : Dict = cached_file(lowerCamelCase, 'conf', _raise_exceptions_for_connection_errors=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased', 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid model identifier'):
get_file_from_repo('bert-base-case', lowerCamelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased', lowerCamelCase, revision='ahaha')
_lowercase : int = get_file_from_repo('bert-base-cased', lowerCamelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowercase : Tuple = json.loads(open(lowerCamelCase, 'r').read())
self.assertEqual(config['hidden_size'], 7_68)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Tuple = Path(lowerCamelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(lowerCamelCase, 'a.txt'), str(lowerCamelCase))
self.assertIsNone(get_file_from_repo(lowerCamelCase, 'b.txt'))
| 89 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = BertGenerationTokenizer
lowercase__ : Optional[int] = False
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self ) -> int:
super().setUp()
lowerCAmelCase__ = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ )
lowerCAmelCase__ = self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = BertGenerationConfig()
lowerCAmelCase__ = BertGenerationEncoder(lowerCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , ) | 90 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
A = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
A = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
A = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A = model(A_ )['last_hidden_state'].detach()
self.assertEqual(output.shape ,A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
A = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
A = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
A = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A = model(A_ )['last_hidden_state'].detach()
self.assertEqual(output.shape ,A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) ) | 91 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ = 250004
UpperCamelCase_ = 250020
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MBartaaTokenizer
lowerCamelCase_ = MBartaaTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : List[Any] =MBartaaTokenizer(UpperCAmelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[Any] ='''<s>'''
lowercase : Dict =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Tuple =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1054 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =MBartaaTokenizer(UpperCAmelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCAmelCase__ )
lowercase : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Dict =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowercase : Tuple =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : Optional[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# fmt: off
lowercase : Optional[int] ={'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase : List[str] =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Tuple =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Union[str, Any] =self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Dict =tempfile.mkdtemp()
lowercase : Dict =tokenizer_r.save_pretrained(UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase : Dict =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
lowercase : Dict =tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowercase : Tuple =tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=True
lowercase : List[str] =tempfile.mkdtemp()
lowercase : Optional[int] =tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
lowercase : Dict =tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
lowercase : Optional[int] =tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=False
lowercase : Optional[Any] =tempfile.mkdtemp()
lowercase : Any =tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
lowercase : Any =tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase : Dict =tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowercase : Any =tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCamelCase_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase_ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def lowerCamelCase_ ( cls : Any ):
'''simple docstring'''
lowercase : MBartaaTokenizer =MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowercase : Tuple =1
return cls
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Tuple =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
lowercase : List[Any] =[RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowercase : Union[str, Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : int =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCAmelCase__ )
lowercase : str =10
lowercase : Tuple =self.tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ ).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : int =tempfile.mkdtemp()
lowercase : List[str] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase : List[str] =MBartaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase__ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' )
lowercase : Optional[int] =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase : Dict =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowercase : Union[str, Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer(self.src_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=3 , return_tensors='''pt''' )
lowercase : Dict =self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=10 , return_tensors='''pt''' )
lowercase : Optional[int] =targets['''input_ids''']
lowercase : int =shift_tokens_right(UpperCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[250004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 92 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : list[int] , __A : int ) -> int:
"""simple docstring"""
if len(__A ) < k or k < 0:
raise ValueError('''Invalid Input''' )
lowercase : List[Any] =sum(array[:k] )
for i in range(len(__A ) - k ):
lowercase : str =current_sum - array[i] + array[i + k]
lowercase : int =max(__A , __A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
SCREAMING_SNAKE_CASE = [randint(-1_000, 1_000) for i in range(100)]
SCREAMING_SNAKE_CASE = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 94 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : List[Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase_ : List[Any] = test_metrics
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.test_metrics.main()
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ : List[str] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
| 95 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__( self : Optional[int] , __snake_case : int , __snake_case : str=1_4 , __snake_case : Any=7 , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : List[str]=True , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : int=9_9 , __snake_case : Tuple=3_2 , __snake_case : List[str]=5 , __snake_case : Optional[Any]=4 , __snake_case : Optional[int]=3_7 , __snake_case : Optional[int]="gelu" , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=5_1_2 , __snake_case : List[Any]=1_6 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : int=4 , __snake_case : Dict=None , ) -> List[str]:
__magic_name__: Tuple = parent
__magic_name__: int = batch_size
__magic_name__: str = seq_length
__magic_name__: Optional[int] = is_training
__magic_name__: Dict = use_token_type_ids
__magic_name__: str = use_input_mask
__magic_name__: int = use_labels
__magic_name__: Union[str, Any] = use_mc_token_ids
__magic_name__: str = vocab_size
__magic_name__: List[str] = hidden_size
__magic_name__: Any = num_hidden_layers
__magic_name__: int = num_attention_heads
__magic_name__: Dict = intermediate_size
__magic_name__: Optional[Any] = hidden_act
__magic_name__: Any = hidden_dropout_prob
__magic_name__: Dict = attention_probs_dropout_prob
__magic_name__: List[Any] = max_position_embeddings
__magic_name__: List[str] = type_vocab_size
__magic_name__: int = type_sequence_label_size
__magic_name__: Optional[int] = initializer_range
__magic_name__: Optional[int] = num_labels
__magic_name__: str = num_choices
__magic_name__: Optional[Any] = scope
__magic_name__: Union[str, Any] = self.vocab_size - 1
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__: Dict = None
if self.use_input_mask:
__magic_name__: Any = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__: List[Any] = None
if self.use_token_type_ids:
__magic_name__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__: Dict = None
if self.use_mc_token_ids:
__magic_name__: Any = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__magic_name__: List[Any] = None
__magic_name__: Any = None
__magic_name__: List[Any] = None
if self.use_labels:
__magic_name__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__: Dict = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__: Tuple = self.get_config()
__magic_name__: Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCamelCase__ ( self : Any , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , *__snake_case : Dict ) -> Any:
__magic_name__: Any = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case )
model(__snake_case , token_type_ids=__snake_case )
__magic_name__: List[Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : int , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Dict , *__snake_case : Any ) -> Union[str, Any]:
__magic_name__: List[Any] = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__magic_name__: List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
),
): int = config_and_inputs
__magic_name__: int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : str , __snake_case : str , __snake_case : Optional[int] , *__snake_case : Union[str, Any] ) -> str:
__magic_name__: int = self.num_labels
__magic_name__: int = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: int = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: str = CTRLModelTester(self )
__magic_name__: Optional[int] = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
__magic_name__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
pass
@slow
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: Union[str, Any] = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
pass
@require_torch
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__: Optional[Any] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__snake_case )
__magic_name__: List[str] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=__snake_case ) # Legal the president is
__magic_name__: List[Any] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__magic_name__: Optional[int] = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() , __snake_case )
| 96 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE_ : int=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE_ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Dict=3_7 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=1_6 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : str=None , ) -> List[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _lowercase ( self : Optional[int] ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Tuple ) -> List[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = MegatronBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
lowercase_ = MegatronBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
lowercase_ = MegatronBertForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
lowercase_ = MegatronBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
lowercase_ = MegatronBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = MegatronBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Any ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a :Tuple = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a :List[str] = True
# test_resize_embeddings = False
a :str = False
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> int:
lowercase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = MegatronBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : int ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a ( snake_case__: Any ):
'''simple docstring'''
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
__a = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ['''MYDIR'''] , SCREAMING_SNAKE_CASE_ )
lowercase_ = MegatronBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = '''ii={} jj={} a={} b={}'''.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertTrue(math.isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rel_tol=SCREAMING_SNAKE_CASE_ , abs_tol=SCREAMING_SNAKE_CASE_ ) , msg=SCREAMING_SNAKE_CASE_ )
| 97 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
'''simple docstring'''
import qiskit
def a__ ( lowercase : int, lowercase : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
_UpperCamelCase = qiskit.QuantumCircuit(4, 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0, 2 )
qc_ha.cx(1, 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0, 1, 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2, 0 ) # extract XOR value
qc_ha.measure(3, 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_UpperCamelCase = qiskit.execute(lowercase, lowercase, shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowercase )
if __name__ == "__main__":
lowercase__ : Dict = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 98 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
SCREAMING_SNAKE_CASE = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """rag"""
_lowerCamelCase = True
def __init__( self , __A=None , __A=True , __A=None , __A=None , __A=None , __A=None , __A=None , __A=" / " , __A=" // " , __A=5 , __A=300 , __A=768 , __A=8 , __A="wiki_dpr" , __A="train" , __A="compressed" , __A=None , __A=None , __A=False , __A=False , __A=0.0 , __A=True , __A=False , __A=False , __A=False , __A=True , __A=None , **__A , ):
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__a = kwargs.pop("""question_encoder""" )
__a = question_encoder_config.pop("""model_type""" )
__a = kwargs.pop("""generator""" )
__a = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__a = AutoConfig.for_model(__A , **__A )
__a = AutoConfig.for_model(__A , **__A )
__a = reduce_loss
__a = label_smoothing
__a = exclude_bos_score
__a = do_marginalize
__a = title_sep
__a = doc_sep
__a = n_docs
__a = max_combined_length
__a = dataset
__a = dataset_split
__a = index_name
__a = retrieval_vector_size
__a = retrieval_batch_size
__a = passages_path
__a = index_path
__a = use_dummy_dataset
__a = output_retrieved
__a = do_deduplication
__a = use_cache
if self.forced_eos_token_id is None:
__a = getattr(self.generator , """forced_eos_token_id""" , __A )
@classmethod
def snake_case_ ( cls , __A , __A , **__A ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def snake_case_ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.question_encoder.to_dict()
__a = self.generator.to_dict()
__a = self.__class__.model_type
return output
| 99 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_A : str = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_A : List[Any] = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_A : Optional[int] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_A : List[Any] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_A : List[str] = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_A : Union[str, Any] = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_A : List[str] = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def __snake_case ( ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = randrange(len(lowerCAmelCase_ ) ), randrange(len(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> List[Any]:
return (generate_random_hand() for _ in range(lowerCAmelCase_ ))
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
assert PokerHand(lowerCAmelCase_ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
assert PokerHand(lowerCAmelCase_ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = PokerHand(lowerCAmelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
assert PokerHand(lowerCAmelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
assert PokerHand(lowerCAmelCase_ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
assert PokerHand(lowerCAmelCase_ ).compare_with(PokerHand(lowerCAmelCase_ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
assert PokerHand(lowerCAmelCase_ ).compare_with(PokerHand(lowerCAmelCase_ ) ) == expected
def __snake_case ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = [PokerHand(lowerCAmelCase_ ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE__ = poker_hands.copy()
shuffle(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = chain(sorted(lowerCAmelCase_ ) )
for index, hand in enumerate(lowerCAmelCase_ ):
assert hand == poker_hands[index]
def __snake_case ( ) -> Tuple:
# Test that five high straights are compared correctly.
SCREAMING_SNAKE_CASE__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=lowerCAmelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __snake_case ( ) -> Tuple:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
SCREAMING_SNAKE_CASE__ = PokerHand('''2C 4S AS 3D 5C''' )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __snake_case ( ) -> str:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , '''poker_hands.txt''' )
with open(lowerCAmelCase_ ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE__ = line[:1_4].strip()
SCREAMING_SNAKE_CASE__ = line[1_5:].strip()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PokerHand(lowerCAmelCase_ ), PokerHand(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = player.compare_with(lowerCAmelCase_ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 100 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'shortest_edge': 3_8_4}
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
SCREAMING_SNAKE_CASE_ : Dict = resample
SCREAMING_SNAKE_CASE_ : int = do_rescale
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : List[Any] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE_ : Tuple = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE_ : List[str] = get_resize_output_image_size(lowerCAmelCase__ , size=lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCAmelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCAmelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Any = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Optional[int] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , crop_pct=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : int = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : int = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 101 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
UpperCamelCase : List[Any] = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
UpperCamelCase : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCamelCase : Tuple = dataset
UpperCamelCase : Dict = name
UpperCamelCase : Optional[int] = con
UpperCamelCase : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase : Optional[int] = num_proc
UpperCamelCase : Tuple = to_sql_kwargs
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.to_sql_kwargs.pop("""sql""" , _A )
UpperCamelCase : Optional[Any] = self.to_sql_kwargs.pop("""con""" , _A )
UpperCamelCase : List[Any] = self.to_sql_kwargs.pop("""index""" , _A )
UpperCamelCase : int = self._write(index=_A , **self.to_sql_kwargs )
return written
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = args
UpperCamelCase : Tuple = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCamelCase : Any = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase : List[str] = batch.to_pandas()
UpperCamelCase : List[str] = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def _a ( self , _A , **_A ):
'''simple docstring'''
UpperCamelCase : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase , UpperCamelCase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 102 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
snake_case = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
snake_case = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
snake_case = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_snake_case = 0.0
for i, j in zip(__lowerCamelCase , __lowerCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__lowerCamelCase , __lowerCamelCase ) else 0.0
_snake_case = n_correct / len(__lowerCamelCase )
return {
"accuracy": accuracy,
}
| 103 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[Any] = "falcon"
A__ : Optional[int] = ["past_key_values"]
def __init__( self , SCREAMING_SNAKE_CASE__=65024 , SCREAMING_SNAKE_CASE__=4544 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=71 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=11 , SCREAMING_SNAKE_CASE__=11 , **SCREAMING_SNAKE_CASE__ , ) -> Any:
A__ = vocab_size
# Backward compatibility with n_embed kwarg
A__ = kwargs.pop("n_embed" , SCREAMING_SNAKE_CASE__ )
A__ = hidden_size if n_embed is None else n_embed
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = hidden_dropout
A__ = attention_dropout
A__ = bos_token_id
A__ = eos_token_id
A__ = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ = alibi
A__ = new_decoder_architecture
A__ = multi_query # Ignored when new_decoder_architecture is True
A__ = parallel_attn
A__ = bias
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def snake_case__ ( self ) -> Tuple:
return self.hidden_size // self.num_attention_heads
@property
def snake_case__ ( self ) -> int:
return not self.alibi
| 104 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE_ : List[str] = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.post_net.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE_ : str = downstream_dict['model.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE_ : Any = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE_ : Any = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE_ : str = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict['objective.W']
return model
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint['Downstream']
SCREAMING_SNAKE_CASE_ : List[Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE_ : Any = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE_ : str = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase__ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 105 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
import random
from typing import Any
def lowerCamelCase_ ( lowerCAmelCase__ : list ) -> list[Any]:
'''simple docstring'''
for _ in range(len(lowerCAmelCase__ ) ):
A = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
A = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
A , A = data[b], data[a]
return data
if __name__ == "__main__":
__snake_case :List[str] =[0, 1, 2, 3, 4, 5, 6, 7]
__snake_case :List[Any] =['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 106 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "efficientnet"
def __init__( self : List[Any], UpperCamelCase__ : int = 3, UpperCamelCase__ : int = 6_00, UpperCamelCase__ : float = 2.0, UpperCamelCase__ : float = 3.1, UpperCamelCase__ : int = 8, UpperCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3], UpperCamelCase__ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92], UpperCamelCase__ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20], UpperCamelCase__ : List[int] = [], UpperCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1], UpperCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1], UpperCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6], UpperCamelCase__ : float = 0.25, UpperCamelCase__ : str = "swish", UpperCamelCase__ : int = 25_60, UpperCamelCase__ : str = "mean", UpperCamelCase__ : float = 0.02, UpperCamelCase__ : float = 0.001, UpperCamelCase__ : float = 0.99, UpperCamelCase__ : float = 0.5, UpperCamelCase__ : float = 0.2, **UpperCamelCase__ : List[str], ) -> int:
super().__init__(**UpperCamelCase__ )
_A = num_channels
_A = image_size
_A = width_coefficient
_A = depth_coefficient
_A = depth_divisor
_A = kernel_sizes
_A = in_channels
_A = out_channels
_A = depthwise_padding
_A = strides
_A = num_block_repeats
_A = expand_ratios
_A = squeeze_expansion_ratio
_A = hidden_act
_A = hidden_dim
_A = pooling_type
_A = initializer_range
_A = batch_norm_eps
_A = batch_norm_momentum
_A = dropout_rate
_A = drop_connect_rate
_A = sum(UpperCamelCase__ ) * 4
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : str ) -> float:
return 1e-5
| 107 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__a: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__a: list[int] = [ord(letter) for letter in string.ascii_lowercase]
__a: set[int] = {ord(char) for char in VALID_CHARS}
__a: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str | None:
_UpperCAmelCase = ""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(__snake_case ) , __snake_case ):
_UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__snake_case )
return decoded
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[str]:
_UpperCAmelCase = []
for key in product(__snake_case , repeat=3 ):
_UpperCAmelCase = try_key(__snake_case , __snake_case )
if encoded is not None:
possibles.append(__snake_case )
return possibles
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _SCREAMING_SNAKE_CASE ( __snake_case = "p059_cipher.txt" ) -> int:
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = Path(__snake_case ).parent.joinpath(__snake_case ).read_text(encoding="""utf-8""" )
_UpperCAmelCase = [int(__snake_case ) for number in data.strip().split(""",""" )]
_UpperCAmelCase = filter_valid_chars(__snake_case )
for common_word in COMMON_WORDS:
_UpperCAmelCase = filter_common_word(__snake_case , __snake_case )
if len(__snake_case ) == 1:
break
_UpperCAmelCase = possibles[0]
return sum(ord(__snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }") | 108 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a = numpy.array([0, 0])
a = numpy.array([0.5, 0.866_0254])
a = numpy.array([1, 0])
a = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = initial_vectors
for _ in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = iteration_step(__UpperCAmelCase )
return vectors
def __magic_name__ ( __UpperCAmelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE = vectors[i + 1]
new_vectors.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> numpy.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = numpy.radians(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = numpy.cos(__UpperCAmelCase ), numpy.sin(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = zip(*__UpperCAmelCase )
plt.plot(__UpperCAmelCase , __UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 109 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class a ( lowercase ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[int] = {}
if prompt is not None:
UpperCAmelCase__ : List[Any] = prompt
if generate_kwargs is not None:
UpperCAmelCase__ : Union[str, Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase__ : List[str] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
UpperCAmelCase__ : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : List[str] = load_image(UpperCamelCase_ )
if prompt is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(UpperCamelCase_ )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
UpperCAmelCase__ : int = self.model.config.model_type
if model_type == "git":
UpperCAmelCase__ : str = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids
UpperCAmelCase__ : Tuple = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase__ : Tuple = torch.tensor(UpperCamelCase_ ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase__ : Optional[int] = self.image_processor(images=UpperCamelCase_ , header_text=UpperCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase__ : Tuple = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
UpperCAmelCase__ : Any = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase__ : Tuple = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase__ : Any = None
return model_inputs
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , UpperCamelCase_ )
and all(x is None for x in model_inputs['input_ids'] )
):
UpperCAmelCase__ : List[str] = None
if generate_kwargs is None:
UpperCAmelCase__ : Any = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase__ : Union[str, Any] = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase__ : Optional[int] = self.model.generate(UpperCamelCase_ , **UpperCamelCase_ , **UpperCamelCase_ )
return model_outputs
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = []
for output_ids in model_outputs:
UpperCAmelCase__ : int = {
'generated_text': self.tokenizer.decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , )
}
records.append(UpperCamelCase_ )
return records
| 110 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
import os
def a__ ( ):
'''simple docstring'''
with open(os.path.dirname(A__ ) + """/p022_names.txt""" ) as file:
__magic_name__ = str(file.readlines()[0] )
__magic_name__ = names.replace("""\"""", """""" ).split(""",""" )
names.sort()
__magic_name__ = 0
__magic_name__ = 0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
__magic_name__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 529 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( __snake_case ):
'''simple docstring'''
__A : int = "trocr"
__A : List[Any] = ["past_key_values"]
__A : List[str] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , __A=5_0265 , __A=1024 , __A=12 , __A=16 , __A=4096 , __A="gelu" , __A=512 , __A=0.1 , __A=0.0 , __A=0.0 , __A=2 , __A=0.02 , __A=0.0 , __A=True , __A=False , __A=True , __A=True , __A=1 , __A=0 , __A=2 , **__A , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = d_model
lowerCamelCase : Optional[int] = decoder_layers
lowerCamelCase : Dict = decoder_attention_heads
lowerCamelCase : int = decoder_ffn_dim
lowerCamelCase : List[str] = activation_function
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = dropout
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : Optional[int] = activation_dropout
lowerCamelCase : Tuple = init_std
lowerCamelCase : Tuple = decoder_layerdrop
lowerCamelCase : Tuple = use_cache
lowerCamelCase : Tuple = scale_embedding
lowerCamelCase : Tuple = use_learned_position_embeddings
lowerCamelCase : Any = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 340 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : int = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return choice(A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = random_pivot(A__ )
# partition based on pivot
# linear time
_lowerCAmelCase : Tuple = [e for e in lst if e < pivot]
_lowerCAmelCase : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A__ ) < k - 1:
return kth_number(A__ , k - len(A__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowercase = 1.054571817E-34 # unit of ℏ : J * s
_lowercase = 3E8 # unit of c : m * s^-1
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
if (force, area, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if force < 0:
raise ValueError("Magnitude of force can not be negative")
if distance < 0:
raise ValueError("Distance can not be negative")
if area < 0:
raise ValueError("Area can not be negative")
if force == 0:
lowerCAmelCase_ : List[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ : Optional[Any] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0")
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if len(A__ ) != len(A__ ):
raise ValueError("String lengths must match!" )
UpperCAmelCase_ = 0
for chara, chara in zip(A__ , A__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 579 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __A ,__A ,__A ,__A ,__A ) -> str:
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
lowercase : List[Any] = json.load(A__ )
lowercase : str = LukeConfig(use_entity_aware_attention=A__ ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowercase : Tuple = torch.load(A__ ,map_location="""cpu""" )
# Load the entity vocab file
lowercase : Optional[Any] = load_entity_vocab(A__ )
lowercase : List[Any] = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase : Any = AddedToken("""<ent>""" ,lstrip=A__ ,rstrip=A__ )
lowercase : Dict = AddedToken("""<ent2>""" ,lstrip=A__ ,rstrip=A__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__ ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(A__ ,A__ )
lowercase : Dict = LukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
lowercase : int = state_dict["""embeddings.word_embeddings.weight"""]
lowercase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
lowercase : List[str] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
lowercase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase : Any = F'''encoder.layer.{layer_index}.attention.self.'''
lowercase : List[Any] = state_dict[prefix + matrix_name]
lowercase : List[Any] = state_dict[prefix + matrix_name]
lowercase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase : Tuple = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase : Tuple = entity_emb[entity_vocab["""[MASK]"""]]
lowercase : Dict = LukeModel(config=A__ ).eval()
lowercase , lowercase : str = model.load_state_dict(A__ ,strict=A__ )
if not (len(A__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(A__ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
lowercase : Any = LukeTokenizer.from_pretrained(A__ ,task="""entity_classification""" )
lowercase : Any = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
lowercase : Dict = (39, 42)
lowercase : Dict = tokenizer(A__ ,entity_spans=[span] ,add_prefix_space=A__ ,return_tensors="""pt""" )
lowercase : Optional[Any] = model(**A__ )
# Verify word hidden states
if model_size == "large":
lowercase : List[Any] = torch.Size((1, 42, 1024) )
lowercase : List[str] = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
lowercase : Tuple = torch.Size((1, 42, 768) )
lowercase : Dict = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A__ ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase : Optional[int] = torch.Size((1, 1, 1024) )
lowercase : Optional[Any] = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
lowercase : int = torch.Size((1, 1, 768) )
lowercase : Optional[Any] = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,A__ ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(A__ ) )
model.save_pretrained(A__ )
def __snake_case ( __A ) -> Optional[Any]:
lowercase : List[Any] = {}
with open(A__ ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(A__ ):
lowercase , lowercase : Dict = line.rstrip().split("""\t""" )
lowercase : Tuple = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCAmelCase: Optional[int] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 607 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
from __future__ import annotations
def a__ ( _UpperCamelCase : str ):
return [ord(A__ ) - 96 for elem in plain]
def a__ ( _UpperCamelCase : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def a__ ( ):
__lowerCamelCase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' ,A__ )
print('''Decoded:''' ,decode(A__ ) )
if __name__ == "__main__":
main()
| 175 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : List[Any] , __snake_case : List[Any] ):
_A = 0
if start < end:
_A = randint(A__ , A__ )
_A = a[end]
_A = a[pivot]
_A = temp
_A , _A = _in_place_partition(A__ , A__ , A__ )
count += _in_place_quick_sort(A__ , A__ , p - 1 )
count += _in_place_quick_sort(A__ , p + 1 , A__ )
return count
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] ):
_A = 0
_A = randint(A__ , A__ )
_A = a[end]
_A = a[pivot]
_A = temp
_A = start - 1
for index in range(A__ , A__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A = new_pivot_index + 1
_A = a[new_pivot_index]
_A = a[index]
_A = temp
_A = a[new_pivot_index + 1]
_A = a[end]
_A = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Optional[int] = TemporaryFile()
_UpperCAmelCase : Dict = 1_00 # 1000 elements are to be sorted
_UpperCAmelCase : Optional[int] = 0, 1 # mean and standard deviation
_UpperCAmelCase : Dict = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Union[str, Any] = np.load(outfile)
_UpperCAmelCase : Any = len(M) - 1
_UpperCAmelCase : Optional[Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 107 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
from math import ceil, sqrt
def a_ ( UpperCamelCase_ : int = 1_0_0_0_0_0_0 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _a ( __snake_case ):
"""simple docstring"""
A_ = 42
class _a ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , _UpperCAmelCase = 32 , _UpperCAmelCase = 64 , _UpperCAmelCase = 20 , _UpperCAmelCase = 768 , _UpperCAmelCase=77 , _UpperCAmelCase=4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = "silu" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "linear" , _UpperCAmelCase = "prd" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> Tuple:
super().__init__()
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = attention_head_dim
UpperCamelCase_ = num_attention_heads * attention_head_dim
UpperCamelCase_ = additional_embeddings
UpperCamelCase_ = time_embed_dim or inner_dim
UpperCamelCase_ = embedding_proj_dim or embedding_dim
UpperCamelCase_ = clip_embed_dim or embedding_dim
UpperCamelCase_ = Timesteps(__lowerCamelCase , __lowerCamelCase , 0 )
UpperCamelCase_ = TimestepEmbedding(__lowerCamelCase , __lowerCamelCase , out_dim=__lowerCamelCase , act_fn=__lowerCamelCase )
UpperCamelCase_ = nn.Linear(__lowerCamelCase , __lowerCamelCase )
if embedding_proj_norm_type is None:
UpperCamelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase_ = nn.LayerNorm(__lowerCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
UpperCamelCase_ = nn.Linear(__lowerCamelCase , __lowerCamelCase )
if encoder_hid_proj_type is None:
UpperCamelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase_ = nn.Linear(__lowerCamelCase , __lowerCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowerCamelCase ) )
if added_emb_type == "prd":
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , 1 , __lowerCamelCase ) )
elif added_emb_type is None:
UpperCamelCase_ = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
UpperCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dropout=__lowerCamelCase , activation_fn='gelu' , attention_bias=__lowerCamelCase , )
for d in range(__lowerCamelCase )
] )
if norm_in_type == "layer":
UpperCamelCase_ = nn.LayerNorm(__lowerCamelCase )
elif norm_in_type is None:
UpperCamelCase_ = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
UpperCamelCase_ = nn.LayerNorm(__lowerCamelCase )
UpperCamelCase_ = nn.Linear(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase_ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , __lowerCamelCase , persistent=__lowerCamelCase )
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) )
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = {}
def fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if hasattr(__lowerCamelCase , 'set_processor' ):
UpperCamelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , __lowerCamelCase , __lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return processors
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = len(self.attn_processors.keys() )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(__lowerCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if hasattr(__lowerCamelCase , 'set_processor' ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
module.set_processor(__lowerCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , __lowerCamelCase , __lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> str:
self.set_attn_processor(AttnProcessor() )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> List[Any]:
UpperCamelCase_ = hidden_states.shape[0]
UpperCamelCase_ = timestep
if not torch.is_tensor(__lowerCamelCase ):
UpperCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0:
UpperCamelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase_ = timesteps * torch.ones(__lowerCamelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase_ = self.time_proj(__lowerCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase_ = self.time_embedding(__lowerCamelCase )
if self.embedding_proj_norm is not None:
UpperCamelCase_ = self.embedding_proj_norm(__lowerCamelCase )
UpperCamelCase_ = self.embedding_proj(__lowerCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase_ = self.encoder_hidden_states_proj(__lowerCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
UpperCamelCase_ = self.proj_in(__lowerCamelCase )
UpperCamelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase_ = []
UpperCamelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowerCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase_ = hidden_states[:, None, :]
UpperCamelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(__lowerCamelCase , -1 , -1 )
additional_embeds.append(__lowerCamelCase )
UpperCamelCase_ = torch.cat(
__lowerCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase_ = F.pad(
__lowerCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCamelCase_ = F.pad(__lowerCamelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase_ = self.norm_in(__lowerCamelCase )
for block in self.transformer_blocks:
UpperCamelCase_ = block(__lowerCamelCase , attention_mask=__lowerCamelCase )
UpperCamelCase_ = self.norm_out(__lowerCamelCase )
if self.prd_embedding is not None:
UpperCamelCase_ = hidden_states[:, -1]
else:
UpperCamelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCamelCase_ = self.proj_to_clip_embeddings(__lowerCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowerCamelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Optional[int] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__lowerCAmelCase : Tuple = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__lowerCAmelCase : Tuple = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if concatenate_texts:
return compute_measures(__lowerCamelCase , __lowerCamelCase )["wer"]
else:
__magic_name__ = 0
__magic_name__ = 0
for prediction, reference in zip(__lowerCamelCase , __lowerCamelCase ):
__magic_name__ = compute_measures(__lowerCamelCase , __lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 529 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( __snake_case ):
'''simple docstring'''
__A : str = "new-model"
if is_tf_available():
class UpperCAmelCase_ ( __snake_case ):
'''simple docstring'''
__A : str = NewModelConfig
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = "bert-base-cased"
lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : Optional[Any] = TFAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "bert-base-cased"
lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase )
lowerCamelCase , lowerCamelCase : Dict = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase )
lowerCamelCase , lowerCamelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
lowerCamelCase , lowerCamelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : int = TFAutoModelForSequenceClassification.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase : int = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : Dict = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
@require_tensorflow_probability
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCamelCase )
lowerCamelCase , lowerCamelCase : int = TFAutoModelForTableQuestionAnswering.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 1_4410 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 1_4410 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase : int = copy.deepcopy(model.config )
lowerCamelCase : Optional[Any] = ["FunnelBaseModel"]
lowerCamelCase : List[Any] = TFAutoModel.from_config(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase )
lowerCamelCase : int = TFAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
try:
AutoConfig.register("new-model" , __lowerCamelCase )
lowerCamelCase : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__lowerCamelCase ):
auto_class.register(__lowerCamelCase , __lowerCamelCase )
auto_class.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
auto_class.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase : List[str] = BertModelTester(self ).get_config()
lowerCamelCase : Optional[Any] = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase : Tuple = auto_class.from_config(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase )
lowerCamelCase : Union[str, Any] = auto_class.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCamelCase : Optional[Any] = TFAutoModel.from_pretrained("bert-base" )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCamelCase : List[str] = TFAutoModel.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
lowerCamelCase : Any = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCamelCase , "Use `from_pt=True` to load this model" ):
lowerCamelCase : List[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
lowerCamelCase : int = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase : int = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
lowerCamelCase : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 340 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase , _lowercase : Any = np.shape(A__ )
if rows != columns:
_lowercase : Any = (
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(A__ )
_lowercase : str = np.zeros((rows, columns) )
_lowercase : Any = np.zeros((rows, columns) )
for i in range(A__ ):
for j in range(A__ ):
_lowercase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_lowercase : Union[str, Any] = (table[i][j] - total) / upper[j][j]
_lowercase : int = 1
for j in range(A__ , A__ ):
_lowercase : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
_lowercase : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCAmelCase : str = model_type_to_module_name(A__ )
_lowerCAmelCase : Tuple = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A__ , '__name__' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCAmelCase : List[Any] = importlib.import_module('transformers' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(A__ , encoding='utf-8' ) as reader:
return json.load(A__ )
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def __lowerCamelCase ( cls ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Any = kwargs.pop('config' ,__lowerCamelCase )
_lowerCAmelCase : Tuple = kwargs.pop('trust_remote_code' ,__lowerCamelCase )
_lowerCAmelCase : str = True
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCamelCase ,**__lowerCamelCase )
_lowerCAmelCase : List[str] = config_dict.get('feature_extractor_type' ,__lowerCamelCase )
_lowerCAmelCase : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_lowerCAmelCase : Optional[Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
_lowerCAmelCase : Any = AutoConfig.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
# It could be in `config.feature_extractor_type``
_lowerCAmelCase : List[Any] = getattr(__lowerCamelCase ,'feature_extractor_type' ,__lowerCamelCase )
if hasattr(__lowerCamelCase ,'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
_lowerCAmelCase : str = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowerCAmelCase : int = feature_extractor_class_from_name(__lowerCamelCase )
_lowerCAmelCase : List[str] = feature_extractor_auto_map is not None
_lowerCAmelCase : Tuple = feature_extractor_class is not None or type(__lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
_lowerCAmelCase : Optional[Any] = resolve_trust_remote_code(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
if has_remote_code and trust_remote_code:
_lowerCAmelCase : Any = get_class_from_dynamic_module(
__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = kwargs.pop('code_revision' ,__lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__lowerCamelCase ,**__lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__lowerCamelCase ,**__lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
_lowerCAmelCase : Dict = FEATURE_EXTRACTOR_MAPPING[type(__lowerCamelCase )]
return feature_extractor_class.from_dict(__lowerCamelCase ,**__lowerCamelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCamelCase ( _A ,_A ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(__lowerCamelCase ,__lowerCamelCase )
| 259 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=30 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = FlaxViTModel(config=__lowerCamelCase )
UpperCAmelCase_ = model(__lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__lowerCamelCase )
UpperCAmelCase_ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__lowerCamelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def A__ ( self ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowerCamelCase )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(lowerCAmelCase , **lowerCAmelCase ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__lowerCamelCase )
| 579 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( __A ) -> str:
if len(A__ ) == 0:
return array
lowercase , lowercase : int = min(A__ ), max(A__ )
# Compute the variables
lowercase : Optional[int] = _max - _min + 1
lowercase , lowercase : List[str] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase : Dict = i - _min
lowercase : List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase : List[Any] = 0
for i in range(A__ ):
while holes_repeat[i] > 0:
lowercase : List[str] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] =input("Enter numbers separated by comma:\n")
lowerCAmelCase: str =[int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 607 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
import qiskit
def a__ ( _UpperCamelCase : int = 2 ):
__lowerCamelCase = qubits
# Using Aer's simulator
__lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
__lowerCamelCase = qiskit.QuantumCircuit(A__ ,A__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 ,A__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,A__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A__ ) ) ,list(range(A__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowerCamelCase = qiskit.execute(A__ ,A__ ,shots=10_00 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
| 175 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase_ ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = ["vqvae"]
def __init__( self : int, UpperCamelCase__ : AutoencoderKL, UpperCamelCase__ : UNetaDConditionModel, UpperCamelCase__ : Mel, UpperCamelCase__ : Union[DDIMScheduler, DDPMScheduler], ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase, scheduler=__lowerCamelCase, mel=__lowerCamelCase, vqvae=__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Any:
return 50 if isinstance(self.scheduler, __lowerCamelCase ) else 10_00
@torch.no_grad()
def __call__( self : Dict, UpperCamelCase__ : int = 1, UpperCamelCase__ : str = None, UpperCamelCase__ : np.ndarray = None, UpperCamelCase__ : int = 0, UpperCamelCase__ : int = 0, UpperCamelCase__ : int = None, UpperCamelCase__ : torch.Generator = None, UpperCamelCase__ : float = 0, UpperCamelCase__ : float = 0, UpperCamelCase__ : torch.Generator = None, UpperCamelCase__ : float = 0, UpperCamelCase__ : torch.Tensor = None, UpperCamelCase__ : torch.Tensor = None, UpperCamelCase__ : Optional[int]=True, ) -> Any:
_A = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_A = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_A = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_A = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
), generator=__lowerCamelCase, device=self.device, )
_A = noise
_A = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase, __lowerCamelCase )
_A = self.mel.audio_slice_to_image(__lowerCamelCase )
_A = np.frombuffer(input_image.tobytes(), dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_A = (input_image / 2_55) * 2 - 1
_A = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_A = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase, 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_A = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_A = self.scheduler.add_noise(__lowerCamelCase, __lowerCamelCase, self.scheduler.timesteps[start_step - 1] )
_A = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_A = int(mask_start_secs * pixels_per_second )
_A = int(mask_end_secs * pixels_per_second )
_A = self.scheduler.add_noise(__lowerCamelCase, __lowerCamelCase, torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet, __lowerCamelCase ):
_A = self.unet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )['sample']
else:
_A = self.unet(__lowerCamelCase, __lowerCamelCase )['sample']
if isinstance(self.scheduler, __lowerCamelCase ):
_A = self.scheduler.step(
model_output=__lowerCamelCase, timestep=__lowerCamelCase, sample=__lowerCamelCase, eta=__lowerCamelCase, generator=__lowerCamelCase, )['prev_sample']
else:
_A = self.scheduler.step(
model_output=__lowerCamelCase, timestep=__lowerCamelCase, sample=__lowerCamelCase, generator=__lowerCamelCase, )['prev_sample']
if mask is not None:
if mask_start > 0:
_A = mask[:, step, :, :mask_start]
if mask_end > 0:
_A = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_A = 1 / self.vqvae.config.scaling_factor * images
_A = self.vqvae.decode(__lowerCamelCase )['sample']
_A = (images / 2 + 0.5).clamp(0, 1 )
_A = images.cpu().permute(0, 2, 3, 1 ).numpy()
_A = (images * 2_55).round().astype('uint8' )
_A = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase, mode='RGB' ).convert('L' ) for _ in images) )
_A = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ), **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def __UpperCAmelCase ( self : int, UpperCamelCase__ : List[Image.Image], UpperCamelCase__ : int = 50 ) -> Any:
assert isinstance(self.scheduler, __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_A = np.array(
[np.frombuffer(image.tobytes(), dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_A = (sample / 2_55) * 2 - 1
_A = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,) ) ):
_A = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_A = self.scheduler.alphas_cumprod[t]
_A = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_A = 1 - alpha_prod_t
_A = self.unet(__lowerCamelCase, __lowerCamelCase )['sample']
_A = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_A = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_A = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : torch.Tensor, UpperCamelCase__ : torch.Tensor, UpperCamelCase__ : float ) -> Optional[int]:
_A = acos(torch.dot(torch.flatten(__lowerCamelCase ), torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCAmelCase : str = logging.get_logger(__name__)
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : List[str] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ) -> str:
'''simple docstring'''
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 246 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class _a ( metaclass=__snake_case ):
"""simple docstring"""
A_ = ["""sentencepiece"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['sentencepiece'] )
| 23 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
from __future__ import annotations
from math import pow, sqrt
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(A__, 2 ) - pow(A__, 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(A__, 2 ) - pow(A__, 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(A__, 2 ) + pow(A__, 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env" )
def _snake_case ( self , __A=1 ):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _snake_case ( self , __A ):
"""simple docstring"""
TrainingJobAnalytics(__lowerCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCamelCase : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase )
| 340 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_lowercase : List[str] = k.replace(A__ , A__ )
if k.startswith('encoder' ):
_lowercase : int = k.replace('.attn' , '.self_attn' )
_lowercase : int = k.replace('norm1' , 'self_attn_layer_norm' )
_lowercase : int = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_lowercase : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
_lowercase : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
_lowercase : str = k.replace('norm3' , 'final_layer_norm' )
return k
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_lowercase : Tuple = sd.pop(A__ )
_lowercase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_lowercase : Dict = v
SCREAMING_SNAKE_CASE : Tuple = ['START']
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[int] = torch.load(A__ , map_location='cpu' )
_lowercase : int = model['model']
_lowercase : List[str] = BlenderbotConfig.from_json_file(A__ )
_lowercase : Optional[Any] = BlenderbotForConditionalGeneration(A__ )
_lowercase : str = m.model.state_dict().keys()
_lowercase : int = []
_lowercase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_lowercase : str = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_lowercase : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( __snake_case ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowerCamelCase ,scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = 50 ,_A = None ,_A = "pil" ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = self.unet.config.sample_size
_lowerCAmelCase : str = (batch_size, 3, img_size, img_size)
_lowerCAmelCase : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase : str = randn_tensor(__lowerCamelCase ,generator=__lowerCamelCase ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowerCAmelCase : Optional[Any] = self.scheduler.schedule[t]
_lowerCAmelCase : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.scheduler.add_noise_to_input(__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase : List[Any] = self.scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
_lowerCAmelCase : Optional[int] = self.scheduler.step_correct(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,step_output.prev_sample ,step_output['derivative'] ,)
_lowerCAmelCase : Tuple = step_output.prev_sample
_lowerCAmelCase : List[Any] = (sample / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Dict = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 259 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __snake_case ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ = None
class __snake_case ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ = PandasConfig
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCAmelCase_ : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase ,(str, list, tuple) ):
lowerCAmelCase_ : List[Any] = data_files
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase_ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ : Optional[int] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
lowerCAmelCase_ : str = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ : Optional[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase ,gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : pa.Table ) -> str:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase_ : Optional[Any] = table_cast(__lowerCamelCase ,self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
with open(__lowerCamelCase ,"rb" ) as f:
lowerCAmelCase_ : Any = pa.Table.from_pandas(pd.read_pickle(__lowerCamelCase ) )
yield i, self._cast_table(__lowerCamelCase )
| 659 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
import logging
import os
from .state import PartialState
class lowerCamelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def A__ ( lowerCAmelCase ):
UpperCAmelCase_ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def A__ ( self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
UpperCAmelCase_ = kwargs.pop("main_process_only" , __lowerCamelCase )
UpperCAmelCase_ = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
UpperCAmelCase_ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase_ , UpperCAmelCase_ = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> int:
if log_level is None:
UpperCAmelCase_ = os.environ.get("ACCELERATE_LOG_LEVEL" , A__ )
UpperCAmelCase_ = logging.getLogger(A__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(A__ , {} )
| 579 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : str = """hf-internal-testing/tiny-random-t5"""
lowercase : Any = AutoTokenizer.from_pretrained(__lowerCamelCase )
lowercase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
lowercase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase : Any = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase : List[str] = model.generate(**__lowerCamelCase )
lowercase : Optional[int] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
lowercase : int = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase : List[Any] = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = """hf-internal-testing/tiny-random-t5"""
lowercase : str = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
lowercase : Any = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
lowercase : Dict = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 607 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 512}
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(A__ )
return pairs
class __lowerCAmelCase ( __snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="__start__" , __UpperCAmelCase="__end__" , __UpperCAmelCase="__unk__" , __UpperCAmelCase="__null__" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(__lowerCamelCase )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__lowerCamelCase = {}
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCamelCase = re.sub('''([.,!?()])''' , r''' \1''' , __lowerCamelCase )
__lowerCamelCase = re.sub('''(\')''' , r''' \1 ''' , __lowerCamelCase )
__lowerCamelCase = re.sub(r'''\s{2,}''' , ''' ''' , __lowerCamelCase )
if "\n" in token:
__lowerCamelCase = token.replace('''\n''' , ''' __newln__''' )
__lowerCamelCase = token.split(''' ''' )
__lowerCamelCase = []
for token in tokens:
if not len(__lowerCamelCase ):
continue
__lowerCamelCase = token.lower()
__lowerCamelCase = tuple(__lowerCamelCase )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowerCamelCase = get_pairs(__lowerCamelCase )
if not pairs:
words.append(__lowerCamelCase )
continue
while True:
__lowerCamelCase = min(__lowerCamelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase ,__lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(__lowerCamelCase ):
try:
__lowerCamelCase = word.index(__lowerCamelCase , __lowerCamelCase )
new_word.extend(word[i:j] )
__lowerCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(__lowerCamelCase )
__lowerCamelCase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__lowerCamelCase = get_pairs(__lowerCamelCase )
__lowerCamelCase = '''@@ '''.join(__lowerCamelCase )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
words.append(__lowerCamelCase )
return " ".join(__lowerCamelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = token.lower()
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
__lowerCamelCase = 0
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 175 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.