code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
a = MODEL_FOR_MASKED_LM_MAPPING
a = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCamelCase_ ( self: str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
lowerCamelCase__ : Dict = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
lowerCamelCase__ : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
lowerCamelCase__ : List[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
lowerCamelCase__ : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
lowerCamelCase__ : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
lowerCamelCase__ : Tuple = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
lowerCamelCase__ : List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
lowerCamelCase__ : Union[str, Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(UpperCamelCase__ )
@slow
@require_tf
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: List[str] ):
lowerCamelCase__ : int = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
lowerCamelCase__ : Any = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
lowerCamelCase__ : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Dict = None
self.run_pipeline_test(UpperCamelCase__ , [] )
@require_tf
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
self.run_pipeline_test(UpperCamelCase__ , [] )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
lowerCamelCase__ : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str] ):
lowerCamelCase__ : List[str] = fill_masker.tokenizer
lowerCamelCase__ : Optional[int] = fill_masker.model
lowerCamelCase__ : Tuple = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : str = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : Union[str, Any] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
with self.assertRaises(UpperCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCamelCase__ ):
fill_masker("""This is""" )
self.run_test_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_targets(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_top_k_targets(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_multiple_masks(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Optional[int] = tokenizer.get_vocab()
lowerCamelCase__ : str = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase__ : Any = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , targets=UpperCamelCase__ )
lowerCamelCase__ : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Call argument
lowerCamelCase__ : str = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : str = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
lowerCamelCase__ : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Score equivalence
lowerCamelCase__ : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
lowerCamelCase__ : int = [top_mask["""token_str"""] for top_mask in outputs]
lowerCamelCase__ : Optional[int] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ) == set(UpperCamelCase__ ):
lowerCamelCase__ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
# Raises with invalid
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: int ):
lowerCamelCase__ : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , top_k=2 )
lowerCamelCase__ : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : Dict = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: int ):
lowerCamelCase__ : str = tokenizer.get_vocab()
lowerCamelCase__ : Optional[Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# top_k=2, ntargets=3
lowerCamelCase__ : Optional[int] = sorted(vocab.keys() )[:3]
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase__ : Any = [el["""token_str"""] for el in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ).issubset(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase__ : Union[str, Any] = sorted(vocab.keys() )[:3]
lowerCamelCase__ : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase__ : Dict = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=UpperCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCamelCase__ ) , 3 )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Any , UpperCamelCase__: Any ):
lowerCamelCase__ : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : str = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
| 41 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowercase : Union[str, Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowercase : str = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowercase : Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowerCAmelCase_ , use_stemmer=lowerCAmelCase_ )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = scorer.score(lowerCAmelCase_ , lowerCAmelCase_ )
if use_aggregator:
aggregator.add_scores(lowerCAmelCase_ )
else:
scores.append(lowerCAmelCase_ )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result
| 42 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,)
lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 346 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Dict = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
__UpperCamelCase :List[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
sd_pipe.set_scheduler('''sample_euler''')
__UpperCamelCase :Tuple = '''A painting of a squirrel eating a burger'''
__UpperCamelCase :Union[str, Any] = torch.manual_seed(0)
__UpperCamelCase :List[str] = sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''')
__UpperCamelCase :str = output.images
__UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase :Optional[int] = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
sd_pipe.set_scheduler('''sample_euler''')
__UpperCamelCase :Tuple = '''A painting of a squirrel eating a burger'''
__UpperCamelCase :List[str] = torch.manual_seed(0)
__UpperCamelCase :List[Any] = sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''')
__UpperCamelCase :int = output.images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase :str = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
__UpperCamelCase :str = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
sd_pipe.set_scheduler('''sample_dpmpp_2m''')
__UpperCamelCase :Optional[int] = '''A painting of a squirrel eating a burger'''
__UpperCamelCase :str = torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__lowercase , )
__UpperCamelCase :List[str] = output.images
__UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase :Tuple = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 43 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """vivit"""
def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = tubelet_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
super().__init__(**_UpperCAmelCase )
| 346 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 0 |
"""simple docstring"""
import torch
from torch import nn
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a=1 , _a=False ):
super().__init__()
__a = n_token
__a = d_embed
__a = d_proj
__a = cutoffs + [n_token]
__a = [0] + self.cutoffs
__a = div_val
__a = self.cutoffs[0]
__a = len(self.cutoffs ) - 1
__a = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__a = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__a = nn.Parameter(torch.zeros(self.n_clusters ) )
__a = nn.ModuleList()
__a = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
else:
self.out_projs.append(_a )
self.out_layers.append(nn.Linear(_a , _a ) )
else:
for i in range(len(self.cutoffs ) ):
__a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
self.out_layers.append(nn.Linear(_a , r_idx - l_idx ) )
__a = keep_order
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
if proj is None:
__a = nn.functional.linear(_a , _a , bias=_a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__a = nn.functional.linear(_a , proj.t().contiguous() )
__a = nn.functional.linear(_a , _a , bias=_a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __UpperCAmelCase ( self , _a , _a=None , _a=False ):
if labels is not None:
# Shift so that tokens < n predict n
__a = hidden[..., :-1, :].contiguous()
__a = labels[..., 1:].contiguous()
__a = hidden.view(-1 , hidden.size(-1 ) )
__a = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__a = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__a = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__a = labels != -100
__a = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
__a = (
-nn.functional.log_softmax(_a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__a = nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
__a , __a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a = self.out_layers[0].weight[l_idx:r_idx]
__a = self.out_layers[0].bias[l_idx:r_idx]
else:
__a = self.out_layers[i].weight
__a = self.out_layers[i].bias
if i == 0:
__a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
__a , __a , __a = weights[0], biases[0], self.out_projs[0]
__a = self._compute_logit(_a , _a , _a , _a )
__a = nn.functional.log_softmax(_a , dim=1 )
if labels is None:
__a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__a = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
__a = 0
__a = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
__a , __a = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__a = (labels >= l_idx) & (labels < r_idx)
__a = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__a = labels.index_select(0 , _a ) - l_idx
__a = head_logprob.index_select(0 , _a )
__a = hidden.index_select(0 , _a )
else:
__a = hidden
if i == 0:
if labels is not None:
__a = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__a = head_logprob[:, : self.cutoffs[0]]
else:
__a , __a , __a = weights[i], biases[i], self.out_projs[i]
__a = self._compute_logit(_a , _a , _a , _a )
__a = nn.functional.log_softmax(_a , dim=1 )
__a = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__a = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__a = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__a = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __UpperCAmelCase ( self , _a ):
if self.n_clusters == 0:
__a = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
__a , __a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a = self.out_layers[0].weight[l_idx:r_idx]
__a = self.out_layers[0].bias[l_idx:r_idx]
else:
__a = self.out_layers[i].weight
__a = self.out_layers[i].bias
if i == 0:
__a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
__a , __a , __a = weights[0], biases[0], self.out_projs[0]
__a = self._compute_logit(_a , _a , _a , _a )
__a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__a = nn.functional.log_softmax(_a , dim=1 )
__a = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
__a , __a = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__a = head_logprob[:, : self.cutoffs[0]]
else:
__a , __a , __a = weights[i], biases[i], self.out_projs[i]
__a = self._compute_logit(_a , _a , _a , _a )
__a = nn.functional.log_softmax(_a , dim=1 )
__a = head_logprob[:, -i] + tail_logprob_i
__a = logprob_i
return out
| 45 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 2_00 ):
'''simple docstring'''
lowerCAmelCase = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
lowerCAmelCase = [0] * (pence + 1)
lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 46 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
UpperCAmelCase__ = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples["""text"""] )
return fn
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
UpperCAmelCase__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE__ : int ):
# Concatenate all texts.
UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase__ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase__ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 346 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Dict = {
"google/realm-cc-news-pretrained-embedder": 5_1_2,
"google/realm-cc-news-pretrained-encoder": 5_1_2,
"google/realm-cc-news-pretrained-scorer": 5_1_2,
"google/realm-cc-news-pretrained-openqa": 5_1_2,
"google/realm-orqa-nq-openqa": 5_1_2,
"google/realm-orqa-nq-reader": 5_1_2,
"google/realm-orqa-wq-openqa": 5_1_2,
"google/realm-orqa-wq-reader": 5_1_2,
}
lowerCamelCase : int = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = RealmTokenizer
def __init__( self : Tuple , _a : Union[str, Any]=None , _a : List[Any]=None , _a : str=True , _a : Dict="[UNK]" , _a : List[Any]="[SEP]" , _a : List[str]="[PAD]" , _a : int="[CLS]" , _a : List[Any]="[MASK]" , _a : List[Any]=True , _a : str=None , **_a : Any , ) -> str:
'''simple docstring'''
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _a ) != do_lower_case
or normalizer_state.get('strip_accents' , _a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars
):
_SCREAMING_SNAKE_CASE =getattr(_a , normalizer_state.pop('type' ) )
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =strip_accents
_SCREAMING_SNAKE_CASE =tokenize_chinese_chars
_SCREAMING_SNAKE_CASE =normalizer_class(**_a )
_SCREAMING_SNAKE_CASE =do_lower_case
def A ( self : int , _a : Union[str, Any] , **_a : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PaddingStrategy.MAX_LENGTH
_SCREAMING_SNAKE_CASE =text
_SCREAMING_SNAKE_CASE =kwargs.pop('text_pair' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('return_tensors' , _a )
_SCREAMING_SNAKE_CASE ={
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
_SCREAMING_SNAKE_CASE =batch_text_pair[idx]
else:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =super().__call__(_a , _a , return_tensors=_a , **_a )
_SCREAMING_SNAKE_CASE =encoded_candidates.get('input_ids' )
_SCREAMING_SNAKE_CASE =encoded_candidates.get('attention_mask' )
_SCREAMING_SNAKE_CASE =encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
_SCREAMING_SNAKE_CASE ={key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a , tensor_type=_a )
def A ( self : int , _a : Optional[Any] , _a : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : str , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 47 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ = '\\n\n'
UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = """cuda"""
else:
UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model.to(_UpperCAmelCase )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
UpperCAmelCase__ = encodings["""input_ids"""]
UpperCAmelCase__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 346 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case :Tuple = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Any = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_image(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 346 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """deformable_detr"""
UpperCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , UpperCAmelCase : Any=True , UpperCAmelCase : str=None , UpperCAmelCase : Dict=3 , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Optional[int]=1024 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : List[str]=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]="relu" , UpperCAmelCase : str=256 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : List[str]=0.0_2 , UpperCAmelCase : int=1.0 , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : str="sine" , UpperCAmelCase : int="resnet50" , UpperCAmelCase : Any=True , UpperCAmelCase : int=False , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=300 , UpperCAmelCase : List[str]=False , UpperCAmelCase : int=1 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[Any]=0.2_5 , UpperCAmelCase : Dict=False , **UpperCAmelCase : Optional[Any] , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase__ : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = backbone_config.get('model_type' )
lowerCamelCase__ : int = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Union[str, Any] = config_class.from_dict(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = use_timm_backbone
lowerCamelCase__ : List[Any] = backbone_config
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = num_queries
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Optional[Any] = d_model
lowerCamelCase__ : List[str] = encoder_ffn_dim
lowerCamelCase__ : Optional[Any] = encoder_layers
lowerCamelCase__ : Tuple = encoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : List[Any] = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : str = dropout
lowerCamelCase__ : Any = attention_dropout
lowerCamelCase__ : Any = activation_dropout
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : str = init_xavier_std
lowerCamelCase__ : Optional[int] = encoder_layerdrop
lowerCamelCase__ : Union[str, Any] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[Any] = backbone
lowerCamelCase__ : int = use_pretrained_backbone
lowerCamelCase__ : List[str] = dilation
# deformable attributes
lowerCamelCase__ : Any = num_feature_levels
lowerCamelCase__ : Optional[int] = encoder_n_points
lowerCamelCase__ : Union[str, Any] = decoder_n_points
lowerCamelCase__ : Union[str, Any] = two_stage
lowerCamelCase__ : str = two_stage_num_proposals
lowerCamelCase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCamelCase__ : int = class_cost
lowerCamelCase__ : str = bbox_cost
lowerCamelCase__ : Optional[int] = giou_cost
# Loss coefficients
lowerCamelCase__ : int = mask_loss_coefficient
lowerCamelCase__ : int = dice_loss_coefficient
lowerCamelCase__ : Tuple = bbox_loss_coefficient
lowerCamelCase__ : Union[str, Any] = giou_loss_coefficient
lowerCamelCase__ : Any = eos_coefficient
lowerCamelCase__ : Any = focal_alpha
lowerCamelCase__ : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A_ ( self : str ) -> int:
return self.encoder_attention_heads
@property
def A_ ( self : str ) -> int:
return self.d_model
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : Union[str, Any] = self.backbone_config.to_dict()
lowerCamelCase__ : Dict = self.__class__.model_type
return output
| 50 |
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ):
'''simple docstring'''
UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 346 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case_ : int = "bert-base-cased"
snake_case_ : List[Any] = "google/pegasus-xsum"
snake_case_ : Optional[Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
snake_case_ : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
snake_case_ : List[str] = "patrickvonplaten/t5-tiny-random"
snake_case_ : Tuple = "sshleifer/bart-tiny-random"
snake_case_ : List[str] = "sshleifer/tiny-mbart"
snake_case_ : List[Any] = "sshleifer/tiny-marian-en-de"
def A (__A : Path , __A : list ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__A , F"""{split}.source""" ) , __A )
_dump_articles(os.path.join(__A , F"""{split}.target""" ) , __A )
return tmp_dir
class __snake_case ( a ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES)
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES)
UpperCAmelCase_ = 4
UpperCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ , UpperCAmelCase_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=_snake_case , max_target_length=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , )
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(_snake_case , _snake_case)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def lowerCamelCase ( self : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES)
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES)
UpperCAmelCase_ = 4
UpperCAmelCase_ = LegacySeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=20 , max_target_length=_snake_case , )
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
UpperCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
UpperCAmelCase_ = tmp_dir.joinpath('''train.source''').open().readlines()
UpperCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(_snake_case , _snake_case , 128 , _snake_case)
UpperCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_snake_case) < len(_snake_case)
assert len(_snake_case) == 1
assert len(packed_examples[0]) == sum(len(_snake_case) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset(max_len=64)
UpperCAmelCase_ = 64
UpperCAmelCase_ = ds.make_dynamic_sampler(_snake_case , required_batch_size_multiple=_snake_case)
UpperCAmelCase_ = [len(_snake_case) for x in batch_sampler]
assert len(set(_snake_case)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_snake_case) == len(_snake_case) # no dropped or added examples
UpperCAmelCase_ = DataLoader(_snake_case , batch_sampler=_snake_case , collate_fn=ds.collate_fn , num_workers=2)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for batch in data_loader:
UpperCAmelCase_ = batch['''input_ids'''].shape
UpperCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(_snake_case)
if num_src_tokens > (max_tokens * 1.1):
failures.append(_snake_case)
assert num_src_per_batch[0] == max(_snake_case)
if failures:
raise AssertionError(F"""too many tokens in {len(_snake_case)} batches""")
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset(max_len=512)
UpperCAmelCase_ = 2
UpperCAmelCase_ = ds.make_sortish_sampler(_snake_case , shuffle=_snake_case)
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2)
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2 , sampler=_snake_case)
UpperCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_snake_case : List[Any] , _snake_case : Tuple="input_ids"):
return [batch[k].eq(_snake_case).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_snake_case , k='''labels''')) < sum(count_pad_tokens(_snake_case , k='''labels'''))
assert sum(count_pad_tokens(_snake_case)) < sum(count_pad_tokens(_snake_case))
assert len(_snake_case) == len(_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict=1000 , _snake_case : str=128):
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , _snake_case):
UpperCAmelCase_ = '''examples/seq2seq/wmt_en_ro'''
UpperCAmelCase_ = max_len * 2 * 64
if not Path(_snake_case).joinpath('''train.len''').exists():
save_len_file(_snake_case , _snake_case)
else:
UpperCAmelCase_ = '''examples/seq2seq/test_data/wmt_en_ro'''
UpperCAmelCase_ = max_len * 4
save_len_file(_snake_case , _snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=_snake_case , max_target_length=_snake_case , n_obs=_snake_case , )
return ds, max_tokens, tokenizer
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset()
UpperCAmelCase_ = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=0 , add_extra_examples=_snake_case))
UpperCAmelCase_ = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=1 , add_extra_examples=_snake_case))
assert idsa.intersection(_snake_case) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase ( self : List[str] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , use_fast=_snake_case)
if tok_name == MBART_TINY:
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
UpperCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_snake_case) == 1 if tok_name == BART_TINY else len(_snake_case) == 0
| 51 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Any = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( __snake_case ):
_UpperCAmelCase :Union[PIL.Image.Image, np.ndarray]
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=A_ , image_encoder=A_ , image_processor=A_ , scheduler=A_ , renderer=A_ , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
UpperCamelCase : str = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : Dict = latents.to(A_ )
UpperCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
if isinstance(A_ , A_ ) and isinstance(image[0] , torch.Tensor ):
UpperCamelCase : Any = torch.cat(A_ , axis=0 ) if image[0].ndim == 4 else torch.stack(A_ , axis=0 )
if not isinstance(A_ , torch.Tensor ):
UpperCamelCase : Dict = self.image_processor(A_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
UpperCamelCase : str = image.to(dtype=self.image_encoder.dtype , device=A_ )
UpperCamelCase : Union[str, Any] = self.image_encoder(A_ )["last_hidden_state"]
UpperCamelCase : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCamelCase : Dict = image_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str] = torch.zeros_like(A_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ = 1 , A_ = 25 , A_ = None , A_ = None , A_ = 4.0 , A_ = 64 , A_ = "pil" , A_ = True , ):
'''simple docstring'''
if isinstance(A_ , PIL.Image.Image ):
UpperCamelCase : Union[str, Any] = 1
elif isinstance(A_ , torch.Tensor ):
UpperCamelCase : str = image.shape[0]
elif isinstance(A_ , A_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCamelCase : Optional[Any] = len(A_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(A_ )}""" )
UpperCamelCase : Dict = self._execution_device
UpperCamelCase : Dict = batch_size * num_images_per_prompt
UpperCamelCase : str = guidance_scale > 1.0
UpperCamelCase : str = self._encode_image(A_ , A_ , A_ , A_ )
# prior
self.scheduler.set_timesteps(A_ , device=A_ )
UpperCamelCase : Optional[Any] = self.scheduler.timesteps
UpperCamelCase : List[str] = self.prior.config.num_embeddings
UpperCamelCase : Union[str, Any] = self.prior.config.embedding_dim
UpperCamelCase : int = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCamelCase : Optional[int] = latents.reshape(latents.shape[0] , A_ , A_ )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : List[str] = self.scheduler.scale_model_input(A_ , A_ )
UpperCamelCase : List[str] = self.prior(
A_ , timestep=A_ , proj_embedding=A_ , ).predicted_image_embedding
# remove the variance
UpperCamelCase , UpperCamelCase : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCamelCase , UpperCamelCase : Optional[int] = noise_pred.chunk(2 )
UpperCamelCase : int = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCamelCase : Optional[int] = self.scheduler.step(
A_ , timestep=A_ , sample=A_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=A_ )
UpperCamelCase : Any = []
for i, latent in enumerate(A_ ):
print()
UpperCamelCase : Union[str, Any] = self.renderer.decode(
latent[None, :] , A_ , size=A_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(A_ )
UpperCamelCase : List[str] = torch.stack(A_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
UpperCamelCase : str = images.cpu().numpy()
if output_type == "pil":
UpperCamelCase : List[str] = [self.numpy_to_pil(A_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=A_ )
| 52 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = 0
@slow
def _lowerCamelCase ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__A ) , 0 )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
# Check that tokenizer_type ≠ model_type
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , config=__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def _lowerCamelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' , use_fast=__A )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' , use_fast=__A )
self.assertIsInstance(__A , __A )
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : int ):
with pytest.raises(__A ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def _lowerCamelCase ( self : Optional[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCamelCase = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
if isinstance(__A , __A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __A )
else:
self.assertEqual(tokenizer.do_lower_case , __A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def _lowerCamelCase ( self : List[str] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__A , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__UpperCamelCase = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def _lowerCamelCase ( self : List[Any] ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__UpperCamelCase = TOKENIZER_MAPPING.values()
__UpperCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__A )
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__A ) , __A )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __A )
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__A )
__UpperCamelCase = 'Hello, world. How are you?'
__UpperCamelCase = tokenizer.tokenize(__A )
self.assertEqual('[UNK]' , tokens[0] )
__UpperCamelCase = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__A )
__UpperCamelCase = tokenizer.tokenize(__A )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(__A ) , __A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Tuple ):
# Check we can load the tokenizer config of an online model.
__UpperCamelCase = get_tokenizer_config('bert-base-cased' )
__UpperCamelCase = config.pop('_commit_hash' , __A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__A , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCamelCase = get_tokenizer_config(__A )
self.assertDictEqual(__A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = get_tokenizer_config(__A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def _lowerCamelCase ( self : List[str] ):
try:
AutoConfig.register('custom' , __A )
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
__UpperCamelCase = CustomTokenizer.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCamelCase ( self : Optional[int] ):
try:
AutoConfig.register('custom' , __A )
# Can register in two steps
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__A , slow_tokenizer_class=__A , fast_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
__UpperCamelCase = CustomTokenizerFast.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , use_fast=__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A , use_fast=__A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def _lowerCamelCase ( self : Any ):
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =False
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =NewTokenizer
SCREAMING_SNAKE_CASE_ : str =False
try:
AutoConfig.register('custom' , __A )
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__UpperCamelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A , use_fast=__A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def _lowerCamelCase ( self : int ):
with self.assertRaisesRegex(
__A , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase = AutoTokenizer.from_pretrained('bert-base' )
def _lowerCamelCase ( self : str ):
with self.assertRaisesRegex(
__A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , revision='aaaaaa' )
def _lowerCamelCase ( self : Dict ):
# Make sure we have cached the tokenizer.
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 53 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )]
if identifier is not None:
UpperCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase__ = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ = [file for file in files if n_identifier not in file]
UpperCAmelCase__ = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _UpperCAmelCase )
if only_modules:
UpperCAmelCase__ = file.split(""".""" )[0]
try:
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase )
UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """modeling"""
UpperCAmelCase__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """tokenization"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """configuration"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""docs/source""" )
UpperCAmelCase__ = ["""favicon.ico"""]
self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
| 346 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = row, column
__SCREAMING_SNAKE_CASE = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : str ) -> str:
__SCREAMING_SNAKE_CASE = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__SCREAMING_SNAKE_CASE = 0
for row_vector in self.array:
for obj in row_vector:
__SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE = F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
__SCREAMING_SNAKE_CASE = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ) -> str:
return str(self )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = -self[r, c]
return result
def __sub__( self : Optional[int] , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
__SCREAMING_SNAKE_CASE = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__SCREAMING_SNAKE_CASE = F"""Unsupported type given for another ({type(UpperCAmelCase__ )})"""
raise TypeError(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c]
return result
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__SCREAMING_SNAKE_CASE = v.transpose()
__SCREAMING_SNAKE_CASE = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Matrix(3 , 3 , 0 )
for i in range(3 ):
__SCREAMING_SNAKE_CASE = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1, 2, -3
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
def UpperCAmelCase__ ():
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 54 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ):
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ):
pass
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None
with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__"""
UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ = """__test_patch_submodule_successive_join__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
| 346 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {"""vocab_file""": """sentencepiece.model"""}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
a_ : str = {
"""google/rembert""": 256,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="[CLS]" , UpperCamelCase="[SEP]" , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.EncodeAsPieces(UpperCamelCase )
return pieces
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.decode_pieces(UpperCamelCase )
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase ) )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 55 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = AlbertTokenizer
snake_case_ = AlbertTokenizerFast
snake_case_ = True
snake_case_ = True
snake_case_ = True
def A_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Dict , lowercase_ : Optional[int] ):
snake_case_ = '''this is a test'''
snake_case_ = '''this is a test'''
return input_text, output_text
def A_ ( self : Union[str, Any] ):
snake_case_ = '''<pad>'''
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(lowercase_ ) , 3_0000 )
def A_ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def A_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = '''I was born in 92000, and this is falsé.'''
snake_case_ = tokenizer.tokenize(lowercase_ )
snake_case_ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowercase_ )
snake_case_ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def A_ ( self : int ):
snake_case_ = AlbertTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [48, 25, 21, 1289] )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
snake_case_ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
snake_case_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def A_ ( self : Optional[Any] ):
snake_case_ = AlbertTokenizer(lowercase_ )
snake_case_ = tokenizer.encode('''sequence builders''' )
snake_case_ = tokenizer.encode('''multi-sequence build''' )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def A_ ( self : Optional[int] ):
# fmt: off
snake_case_ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 56 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 0 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A : Optional[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_UpperCamelCase )
if not mpi_options.get("sagemaker_mpi_enabled" , _UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(
default="""""" ,metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} ,)
def snake_case ( self ):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __a , )
@cached_property
def snake_case ( self ):
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__lowerCAmelCase = torch.device("cpu" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("cuda" , __a )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__lowerCAmelCase = torch.device("cuda" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("cuda" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(__a )
return device
@property
def snake_case ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case ( self ):
return not is_sagemaker_model_parallel_available()
@property
def snake_case ( self ):
return False
| 57 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase ( __lowerCamelCase : Tuple ) ->int:
_SCREAMING_SNAKE_CASE = EfficientNetConfig()
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""hidden_dim"""]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""width_coef"""]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""depth_coef"""]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dropout_rate"""]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dw_padding"""]
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) ->str:
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
def lowerCamelCase ( __lowerCamelCase : Tuple ) ->List[Any]:
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
_SCREAMING_SNAKE_CASE = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=__lowerCamelCase , )
return preprocessor
def lowerCamelCase ( __lowerCamelCase : Optional[int] ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_SCREAMING_SNAKE_CASE = sorted(set(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase , range(__lowerCamelCase ) )}
_SCREAMING_SNAKE_CASE = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_SCREAMING_SNAKE_CASE = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_SCREAMING_SNAKE_CASE = {}
for item in rename_keys:
if item[0] in original_param_names:
_SCREAMING_SNAKE_CASE = """efficientnet.""" + item[1]
_SCREAMING_SNAKE_CASE = """classifier.weight"""
_SCREAMING_SNAKE_CASE = """classifier.bias"""
return key_mapping
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) ->List[str]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_SCREAMING_SNAKE_CASE = key_mapping[key]
if "_conv" in key and "kernel" in key:
_SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_SCREAMING_SNAKE_CASE = torch.from_numpy(np.transpose(__lowerCamelCase ) )
else:
_SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = model_classes[model_name](
include_top=__lowerCamelCase , weights="""imagenet""" , input_tensor=__lowerCamelCase , input_shape=__lowerCamelCase , pooling=__lowerCamelCase , classes=1000 , classifier_activation="""softmax""" , )
_SCREAMING_SNAKE_CASE = original_model.trainable_variables
_SCREAMING_SNAKE_CASE = original_model.non_trainable_variables
_SCREAMING_SNAKE_CASE = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_SCREAMING_SNAKE_CASE = param.numpy()
_SCREAMING_SNAKE_CASE = list(tf_params.keys() )
# Load HuggingFace model
_SCREAMING_SNAKE_CASE = get_efficientnet_config(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = EfficientNetForImageClassification(__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_SCREAMING_SNAKE_CASE = rename_keys(__lowerCamelCase )
replace_params(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Initialize preprocessor and preprocess input image
_SCREAMING_SNAKE_CASE = convert_image_processor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = hf_model(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.logits.detach().numpy()
# Original model inference
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
_SCREAMING_SNAKE_CASE = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_SCREAMING_SNAKE_CASE = image.img_to_array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.expand_dims(__lowerCamelCase , axis=0 )
_SCREAMING_SNAKE_CASE = original_model.predict(__lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowerCamelCase ):
os.mkdir(__lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(__lowerCamelCase )
preprocessor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
_SCREAMING_SNAKE_CASE = F'efficientnet-{model_name}'
preprocessor.push_to_hub(__lowerCamelCase )
hf_model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 58 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = False
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
UpperCAmelCase__ = TaConfig(
vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , )
UpperCAmelCase__ = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase ):
UpperCAmelCase__ = TaBlock(_UpperCAmelCase )
self.encoders.append(_UpperCAmelCase )
UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase )
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase )
UpperCAmelCase__ = encoder_input_tokens.shape[1]
UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_UpperCAmelCase )
UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase )
# inverted the attention mask
UpperCAmelCase__ = encoder_input_tokens.size()
UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase )
for lyr in self.encoders:
UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0]
UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase )
return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
| 346 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Any = FLAX_MODEL_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModel)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 59 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = {}
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if """lm_head""" in full_key else value[0]
UpperCAmelCase_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = """weight"""
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return is_used
return is_used
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase__ = name.split(""".""" )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" )
UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 346 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[tf.Tensor, np.ndarray] ):
if isinstance(_snake_case , np.ndarray ):
return list(tensor.shape )
lowerCAmelCase : Any = tf.shape(_snake_case )
if tensor.shape == tf.TensorShape(_snake_case ):
return dynamic
lowerCAmelCase : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_snake_case )]
def _snake_case ( _snake_case : tf.Tensor , _snake_case : Optional[int] = None , _snake_case : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=_snake_case , name=_snake_case )
def _snake_case ( _snake_case : List[str] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Any=1E-5 , _snake_case : Union[str, Any]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_snake_case , _snake_case ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCAmelCase, lowerCAmelCase : Tuple = tf.nn.moments(_snake_case , axes=[axis] , keepdims=_snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCAmelCase : Any = [1] * inputs.shape.rank
lowerCAmelCase : Optional[Any] = shape_list(_snake_case )[axis]
lowerCAmelCase : Union[str, Any] = tf.reshape(_snake_case , _snake_case )
lowerCAmelCase : Tuple = tf.reshape(_snake_case , _snake_case )
# Compute layer normalization using the batch_normalization
# function.
lowerCAmelCase : Dict = tf.nn.batch_normalization(
_snake_case , _snake_case , _snake_case , offset=_snake_case , scale=_snake_case , variance_epsilon=_snake_case , )
return outputs
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int]=0 , _snake_case : Tuple=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCAmelCase : Optional[Any] = tf.shape(_snake_case )
lowerCAmelCase : Tuple = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCAmelCase : str = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_snake_case , _snake_case )
def _snake_case ( _snake_case : tf.Tensor ):
if not isinstance(_snake_case , tf.Tensor ):
lowerCAmelCase : Any = tf.convert_to_tensor(_snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCAmelCase : Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCAmelCase : int = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCAmelCase : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( _snake_case : tf.Tensor , _snake_case : int , _snake_case : str = "input_ids" ):
tf.debugging.assert_less(
_snake_case , tf.cast(_snake_case , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(_snake_case )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : str ):
lowerCAmelCase : Tuple = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCAmelCase : Optional[Any] = [x for x in data if len(_snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
lowerCAmelCase : Optional[int] = np.asarray(_snake_case )
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Optional[Any] = np.array_split(_snake_case , _snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCAmelCase : Optional[int] = np.array_split(_snake_case , _snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_snake_case ):
lowerCAmelCase : int = chunk_data
else:
lowerCAmelCase : List[Any] = data
def _snake_case ( _snake_case : str , _snake_case : Union[str, Any] ):
if name in group.attrs:
lowerCAmelCase : Union[str, Any] = [n.decode('''utf8''' ) if hasattr(_snake_case , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCAmelCase : Dict = []
lowerCAmelCase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_snake_case , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( _snake_case : Optional[int] ):
def _expand_single_ad_tensor(_snake_case : List[str] ):
if isinstance(_snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _snake_case )
| 60 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = factor * value
UpperCAmelCase__ = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 346 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=[] ):
__UpperCamelCase =size[0] - overlap_pixels * 2
__UpperCamelCase =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__UpperCamelCase =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__UpperCamelCase =np.pad(SCREAMING_SNAKE_CASE__ , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE__ , end_values=0 )
if "l" in remove_borders:
__UpperCamelCase =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__UpperCamelCase =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__UpperCamelCase =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__UpperCamelCase =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
return max(SCREAMING_SNAKE_CASE__ , min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : [int] ):
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__UpperCamelCase =clamp_rect(SCREAMING_SNAKE_CASE__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE__ , (original_slice, 0) )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
__UpperCamelCase =tile.crop(SCREAMING_SNAKE_CASE__ )
return tile
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =n % d
return n - divisor
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = 350 , ) -> Union[str, Any]:
super().__init__(
vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , low_res_scheduler=A_ , scheduler=A_ , max_noise_level=A_ , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__UpperCamelCase =add_overlap_rect(A_ , A_ , image.size )
__UpperCamelCase =image.crop(A_ )
__UpperCamelCase =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__UpperCamelCase =translated_slice_x - (original_image_slice / 2)
__UpperCamelCase =max(0 , A_ )
__UpperCamelCase =squeeze_tile(A_ , A_ , A_ , A_ )
__UpperCamelCase =to_input.size
__UpperCamelCase =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__UpperCamelCase =super(A_ , self ).__call__(image=A_ , **A_ ).images[0]
__UpperCamelCase =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__UpperCamelCase =unsqueeze_tile(A_ , A_ )
__UpperCamelCase =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__UpperCamelCase =[]
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__UpperCamelCase =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A_ ) , mode='L' , )
final_image.paste(
A_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A_ )
@torch.no_grad()
def __call__( self , A_ , A_ , A_ = 75 , A_ = 9.0 , A_ = 50 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 128 , A_ = 32 , A_ = 32 , ) -> Tuple:
__UpperCamelCase =Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__UpperCamelCase =math.ceil(image.size[0] / tile_size )
__UpperCamelCase =math.ceil(image.size[1] / tile_size )
__UpperCamelCase =tcx * tcy
__UpperCamelCase =0
for y in range(A_ ):
for x in range(A_ ):
self._process_tile(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , prompt=A_ , num_inference_steps=A_ , guidance_scale=A_ , noise_level=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _UpperCAmelCase ( ):
# Run a demo
__UpperCamelCase ='stabilityai/stable-diffusion-x4-upscaler'
__UpperCamelCase =StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='fp16' , torch_dtype=torch.floataa )
__UpperCamelCase =pipe.to('cuda' )
__UpperCamelCase =Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE__ : List[str] ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('diffusers_library_progress.jpg' )
__UpperCamelCase =pipe(image=SCREAMING_SNAKE_CASE__ , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 62 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" )
UpperCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 346 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
lowerCAmelCase_ : List[Any] = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
lowerCAmelCase_ : Dict = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase_ : Optional[Any] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 63 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "ViTImageProcessor"
lowercase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: str, a_: Optional[int]=None, a_: Dict=None, **a_: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : Tuple = kwargs.pop("""feature_extractor""" )
_snake_case : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
def __call__( self: List[str], a_: Union[str, Any]=None, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, **a_: Dict ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
_snake_case : Dict = self.tokenizer(a_, return_tensors=a_, **a_ )
if visual_prompt is not None:
_snake_case : List[Any] = self.image_processor(a_, return_tensors=a_, **a_ )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, return_tensors=a_, **a_ )
if visual_prompt is not None and images is not None:
_snake_case : Any = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_snake_case : Optional[Any] = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ), tensor_type=a_ )
def UpperCamelCase_ ( self: List[Any], *a_: Tuple, **a_: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: Any, *a_: Union[str, Any], **a_: str ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,)
lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 346 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 65 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """vivit"""
def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = tubelet_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
super().__init__(**_UpperCAmelCase )
| 346 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ):
'''simple docstring'''
snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ :Dict = {
config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ :Optional[Any] = collections.defaultdict(_lowercase )
snake_case_ :int = collections.defaultdict(_lowercase )
snake_case_ :List[str] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
snake_case_ :int = None
if _re_tf_models.match(_lowercase ) is not None:
snake_case_ :int = tf_models
snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
snake_case_ :List[Any] = flax_models
snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
snake_case_ :Optional[Any] = pt_models
snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ :Optional[int] = True
break
# Try again after removing the last word in the name
snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ :Optional[Any] = list(_lowercase )
all_models.sort()
snake_case_ :Optional[int] = {"""model_type""": all_models}
snake_case_ :Optional[int] = [pt_models[t] for t in all_models]
snake_case_ :Any = [tf_models[t] for t in all_models]
snake_case_ :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ :Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ :Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ :Tuple = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ :Tuple = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ :str = """AutoTokenizer"""
snake_case_ :int = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase, _lowercase ):
continue
# First extract all model_names
snake_case_ :Tuple = []
for name in getattr(_lowercase, _lowercase ).values():
if isinstance(_lowercase, _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = get_frameworks_table()
snake_case_ :str = Dataset.from_pandas(_lowercase )
snake_case_ :List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase )
snake_case_ :List[str] = Dataset.from_json(_lowercase )
snake_case_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ :Tuple = sorted(table.keys() )
snake_case_ :Tuple = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case_ :Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case_ :List[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ :List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ :int = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase, (list, tuple) ):
snake_case_ :Any = model[0]
snake_case_ :str = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ :Optional[int] = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 66 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if digit_amount > 0:
return round(number - int(UpperCamelCase__ ) , UpperCamelCase__ )
return number - int(UpperCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 67 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1 , SCREAMING_SNAKE_CASE_: int = 1_0_0_0 ) -> int:
'''simple docstring'''
A__ = 1
A__ = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE_ , digit + 1 ):
A__ = []
A__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE_ ):
A__ = len(SCREAMING_SNAKE_CASE_ )
A__ = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE_ )
A__ = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
UpperCAmelCase__ = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples["""text"""] )
return fn
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
UpperCAmelCase__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE__ : int ):
# Concatenate all texts.
UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase__ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase__ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 346 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "inv_freq":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "pos_bias_u" in name:
snake_case_ = None
elif "pos_bias_v" in name:
snake_case_ = None
elif "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
elif "running_mean" in name:
snake_case_ = 'running_mean'
elif "inv_freq" in name:
snake_case_ = 'inv_freq'
elif "running_var" in name:
snake_case_ = 'running_var'
elif "num_batches_tracked" in name:
snake_case_ = 'num_batches_tracked'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> str:
if config_path is not None:
snake_case_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase , hidden_act='swish' )
else:
snake_case_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case_ = 'rotary'
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
snake_case_ = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase , )
snake_case_ = True if config.feat_extract_norm == 'layer' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
snake_case_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
snake_case_ = WavaVecaConformerForCTC(UpperCAmelCase )
else:
snake_case_ = WavaVecaConformerForPreTraining(UpperCAmelCase )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task='audio_pretraining' )
snake_case_ = fairseq.tasks.setup_task(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
snake_case_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 69 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ = '\\n\n'
UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = """cuda"""
else:
UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model.to(_UpperCAmelCase )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
UpperCAmelCase__ = encodings["""input_ids"""]
UpperCAmelCase__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 346 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__ : Tuple =logging.get_logger(__name__)
A__ : List[Any] ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A__ : List[Any] =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if weight_type is not None:
_lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase ).shape
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "running_mean":
_lowerCAmelCase = value
elif weight_type == "running_var":
_lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase = value
elif weight_type == "inv_freq":
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(lowerCAmelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , lowerCAmelCase )
if "pos_bias_u" in name:
_lowerCAmelCase = None
elif "pos_bias_v" in name:
_lowerCAmelCase = None
elif "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
elif "running_mean" in name:
_lowerCAmelCase = """running_mean"""
elif "inv_freq" in name:
_lowerCAmelCase = """inv_freq"""
elif "running_var" in name:
_lowerCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
_lowerCAmelCase = """num_batches_tracked"""
else:
_lowerCAmelCase = None
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True ):
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase , hidden_act="""swish""" )
else:
_lowerCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCAmelCase = """rotary"""
if is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
_lowerCAmelCase = WavaVecaConformerForCTC(lowerCAmelCase )
else:
_lowerCAmelCase = WavaVecaConformerForPreTraining(lowerCAmelCase )
if is_finetuned:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A__ : List[Any] =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 70 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 0 |
def A ( a_ ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__UpperCamelCase : Any =sorted(string.lower() )
return len(a_ ) == len(set(a_ ) )
if __name__ == "__main__":
A_ :Any = input('''Enter a string ''').strip()
A_ :Union[str, Any] = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 71 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_image(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 346 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
if isinstance(A_, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(__lowerCAmelCase , __lowerCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int=None , **__lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = FlaxVisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Dict = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = after_output[0]
_lowerCamelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase )
_lowerCamelCase : Any = model(
input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
_lowerCamelCase : str = output.vision_model_output.attentions
self.assertEqual(len(__lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : str = to_atuple(vision_model.config.image_size )
_lowerCamelCase : List[Any] = to_atuple(vision_model.config.patch_size )
_lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCamelCase : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCamelCase : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(__lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
pt_model.to(__lowerCAmelCase )
pt_model.eval()
# prepare inputs
_lowerCamelCase : str = inputs_dict
_lowerCamelCase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowerCamelCase : List[Any] = pt_model(**__lowerCAmelCase ).to_tuple()
_lowerCamelCase : List[Any] = fx_model(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
_lowerCamelCase : List[Any] = fx_model_loaded(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] = VisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_flax=__lowerCAmelCase )
pt_model_loaded.to(__lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
_lowerCamelCase : Dict = pt_model_loaded(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__lowerCAmelCase , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = VisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : int = FlaxVisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCAmelCase )
_lowerCamelCase : Tuple = fx_state
self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = VisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : str = FlaxVisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
self.check_save_load(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowerCAmelCase )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = config_inputs_dict.pop('''vision_config''' )
_lowerCamelCase : Dict = config_inputs_dict.pop('''text_config''' )
_lowerCamelCase : Tuple = config_inputs_dict
self.check_equivalence_pt_to_flax(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.check_equivalence_flax_to_pt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.get_pretrained_model_and_inputs()
_lowerCamelCase : int = model_a(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model_a(**__lowerCAmelCase )
_lowerCamelCase : Tuple = after_outputs[0]
_lowerCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase , 1E-5 )
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = 1_3
_lowerCamelCase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowerCamelCase : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowerCamelCase : Tuple = random_attention_mask([batch_size, 4] )
_lowerCamelCase : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = FlaxViTModel(__lowerCAmelCase )
_lowerCamelCase : List[str] = FlaxBertModel(__lowerCAmelCase )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaxViTModelTester(self )
_lowerCamelCase : Optional[Any] = FlaxBertModelTester(self )
_lowerCamelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase : List[Any] = vision_config_and_inputs
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __snake_case ( _lowercase , unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , )
_lowerCamelCase : List[str] = 1_3
_lowerCamelCase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowerCamelCase : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowerCamelCase : str = random_attention_mask([batch_size, 4] )
_lowerCamelCase : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = FlaxCLIPVisionModel(__lowerCAmelCase )
_lowerCamelCase : str = FlaxBertModel(__lowerCAmelCase )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = FlaxCLIPVisionModelTester(self )
_lowerCamelCase : str = FlaxBertModelTester(self )
_lowerCamelCase : Dict = clip_model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase : Optional[int] = vision_config_and_inputs
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_lowerCamelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_lowerCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowerCamelCase : Union[str, Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCamelCase : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __lowerCAmelCase , atol=1E-3 ) )
| 72 |
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ):
'''simple docstring'''
UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 346 | 0 |
import logging
from transformers import PretrainedConfig
a =logging.getLogger(__name__)
a ={
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = '''bertabs'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : int=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=6 ,SCREAMING_SNAKE_CASE__ : str=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=8 ,SCREAMING_SNAKE_CASE__ : int=5_1_2 ,SCREAMING_SNAKE_CASE__ : Any=0.2 ,SCREAMING_SNAKE_CASE__ : str=6 ,SCREAMING_SNAKE_CASE__ : Dict=7_6_8 ,SCREAMING_SNAKE_CASE__ : int=8 ,SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int=0.2 ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Optional[Any] = max_pos
__lowerCamelCase : List[Any] = enc_layers
__lowerCamelCase : int = enc_hidden_size
__lowerCamelCase : Tuple = enc_heads
__lowerCamelCase : Union[str, Any] = enc_ff_size
__lowerCamelCase : Union[str, Any] = enc_dropout
__lowerCamelCase : Dict = dec_layers
__lowerCamelCase : Union[str, Any] = dec_hidden_size
__lowerCamelCase : Optional[Any] = dec_heads
__lowerCamelCase : List[Any] = dec_ff_size
__lowerCamelCase : Dict = dec_dropout
| 73 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = FlaxAutoencoderKL
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = 4
A = 3
A = (32, 32)
A = jax.random.PRNGKey(0 )
A = jax.random.uniform(A_ ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A = self.dummy_input
return init_dict, inputs_dict | 74 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )]
if identifier is not None:
UpperCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase__ = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ = [file for file in files if n_identifier not in file]
UpperCAmelCase__ = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _UpperCAmelCase )
if only_modules:
UpperCAmelCase__ = file.split(""".""" )[0]
try:
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase )
UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """modeling"""
UpperCAmelCase__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """tokenization"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """configuration"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""docs/source""" )
UpperCAmelCase__ = ["""favicon.ico"""]
self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
| 346 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = position
SCREAMING_SNAKE_CASE : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
SCREAMING_SNAKE_CASE : Optional[int] = []
for position in positions:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_a)
return permissible_positions
def lowerCamelCase__ ( _a):
return not any(elem == 0 for row in board for elem in row)
def lowerCamelCase__ ( _a , _a , _a):
if is_complete(_a):
return True
for position in get_valid_pos(_a , len(_a)):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = position
if board[y][x] == 0:
SCREAMING_SNAKE_CASE : Any = curr + 1
if open_knight_tour_helper(_a , _a , curr + 1):
return True
SCREAMING_SNAKE_CASE : int = 0
return False
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = [[0 for i in range(_a)] for j in range(_a)]
for i in range(_a):
for j in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = 1
if open_knight_tour_helper(_a , (i, j) , 1):
return board
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Dict = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(_a)
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ):
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ):
pass
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None
with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__"""
UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ = """__test_patch_submodule_successive_join__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
| 346 | 0 |
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 8.3_1_4_4_5_9_8
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_UpperCamelCase : List[Any] = 3_00
_UpperCamelCase : Tuple = 28
_UpperCamelCase : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 77 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = """▁"""
snake_case_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case_ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case_ = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
snake_case_ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case_ = {"""mustc""": MUSTC_LANGS}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = MAX_MODEL_INPUT_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
__UpperCamelCase = []
def __init__( self :int , lowercase_ :Optional[int] , lowercase_ :Tuple , lowercase_ :str="<s>" , lowercase_ :int="</s>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :Optional[Any]="<unk>" , lowercase_ :Union[str, Any]=False , lowercase_ :Tuple=False , lowercase_ :Any=None , lowercase_ :Optional[Any]=None , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Tuple , ) -> None:
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , do_upper_case=lowercase_ , do_lower_case=lowercase_ , tgt_lang=lowercase_ , lang_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = do_upper_case
UpperCAmelCase = do_lower_case
UpperCAmelCase = load_json(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = spm_file
UpperCAmelCase = load_spm(lowercase_ , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase = lang_codes
UpperCAmelCase = LANGUAGES[lang_codes]
UpperCAmelCase = [f"""<lang:{lang}>""" for lang in self.langs]
UpperCAmelCase = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
UpperCAmelCase = self.lang_tokens
UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase = {}
@property
def UpperCAmelCase__ ( self :int ) -> int:
return len(self.encoder )
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
return self._tgt_lang
@tgt_lang.setter
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Dict ) -> None:
UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str ) -> None:
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [lang_code_id]
def UpperCAmelCase__ ( self :Any , lowercase_ :str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int] ) -> int:
return self.encoder.get(lowercase_ , self.encoder[self.unk_token] )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int ) -> str:
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase = self.sp_model.decode(lowercase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase = []
else:
current_sub_tokens.append(lowercase_ )
UpperCAmelCase = self.sp_model.decode(lowercase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[int] , lowercase_ :int=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCAmelCase__ ( self :Dict ) -> Dict:
UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[Any] ) -> Dict:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self :str , lowercase_ :Dict ) -> None:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = Path(lowercase_ )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
UpperCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
UpperCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def _lowerCAmelCase ( lowercase_ ):
with open(lowercase_ , 'r' ) as f:
return json.load(lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
with open(lowercase_ , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=2 )
| 78 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''char'''
snake_case = '''bpe'''
snake_case = '''wp'''
lowerCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''image_processor''', '''char_tokenizer''']
snake_case = '''ViTImageProcessor'''
snake_case = '''MgpstrTokenizer'''
def __init__( self : List[str] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : str ):
'''simple docstring'''
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
_A = kwargs.pop("feature_extractor" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_A = tokenizer
_A = AutoTokenizer.from_pretrained("gpt2" )
_A = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
_A = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings["input_ids"]
return inputs
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A , _A , _A = sequences
_A = char_preds.size(0 )
_A , _A = self._decode_helper(__UpperCAmelCase , "char" )
_A , _A = self._decode_helper(__UpperCAmelCase , "bpe" )
_A , _A = self._decode_helper(__UpperCAmelCase , "wp" )
_A = []
_A = []
for i in range(__UpperCAmelCase ):
_A = [char_scores[i], bpe_scores[i], wp_scores[i]]
_A = [char_strs[i], bpe_strs[i], wp_strs[i]]
_A = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_A = {}
_A = final_strs
_A = final_scores
_A = char_strs
_A = bpe_strs
_A = wp_strs
return out
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_A = self.char_decode
_A = 1
_A = "[s]"
elif format == DecodeType.BPE:
_A = self.bpe_decode
_A = 2
_A = "#"
elif format == DecodeType.WORDPIECE:
_A = self.wp_decode
_A = 102
_A = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
_A , _A = [], []
_A = pred_logits.size(0 )
_A = pred_logits.size(1 )
_A , _A = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
_A = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
_A = decoder(__UpperCAmelCase )
_A , _A = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
_A = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
_A = preds_str[index].find(__UpperCAmelCase )
_A = preds_str[index][:pred_eos]
_A = preds_index[index].cpu().tolist()
_A = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
_A = preds_max_prob[index][: pred_eos_index + 1]
_A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 79 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCamelCase__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def __a ( self ):
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ = DDPMScheduler()
UpperCamelCase__ = AudioDiffusionPipeline(vqvae=a , unet=self.dummy_unet , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a , steps=4 )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a , steps=4 , return_dict=a )
UpperCamelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vqvae_and_unet
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
UpperCamelCase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(raw_audio=a , generator=a , start_step=5 , steps=10 )
UpperCamelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = self.dummy_unet_condition
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
UpperCamelCase__ = torch.rand((1, 1, 10) )
UpperCamelCase__ = pipe(generator=a , encoding=a )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
UpperCamelCase__ = torch_device
UpperCamelCase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 80 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = False
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
UpperCAmelCase__ = TaConfig(
vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , )
UpperCAmelCase__ = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase ):
UpperCAmelCase__ = TaBlock(_UpperCAmelCase )
self.encoders.append(_UpperCAmelCase )
UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase )
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase )
UpperCAmelCase__ = encoder_input_tokens.shape[1]
UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_UpperCAmelCase )
UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase )
# inverted the attention mask
UpperCAmelCase__ = encoder_input_tokens.size()
UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase )
for lyr in self.encoders:
UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0]
UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase )
return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
| 346 | 0 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =x
a =y
for step in range(lowercase ): # noqa: B007
a =a * a - b * b + x
a =2 * a * b + y
a =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A ( lowercase ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _A ( lowercase ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowercase , 1 , 1 ) )
def _A ( lowercase = 8_00 , lowercase = 6_00 , lowercase = -0.6 , lowercase = 0 , lowercase = 3.2 , lowercase = 50 , lowercase = True , ):
"""simple docstring"""
a =Image.new('''RGB''' , (image_width, image_height) )
a =img.load()
# loop through the image-coordinates
for image_x in range(lowercase ):
for image_y in range(lowercase ):
# determine the figure-coordinates based on the image-coordinates
a =figure_width / image_width * image_height
a =figure_center_x + (image_x / image_width - 0.5) * figure_width
a =figure_center_y + (image_y / image_height - 0.5) * figure_height
a =get_distance(lowercase , lowercase , lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a =get_color_coded_rgb(lowercase )
else:
a =get_black_and_white_rgb(lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 81 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = {}
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if """lm_head""" in full_key else value[0]
UpperCAmelCase_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = """weight"""
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return is_used
return is_used
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase__ = name.split(""".""" )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" )
UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 346 | 0 |
A__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case , snake_case , snake_case )
order.append(snake_case )
return order
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case , snake_case , snake_case )
return component
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = len(snake_case ) * [False]
_lowerCAmelCase = {vert: [] for vert in range(len(snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case )
_lowerCAmelCase = []
for i, was_visited in enumerate(snake_case ):
if not was_visited:
order += topology_sort(snake_case , snake_case , snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = len(snake_case ) * [False]
for i in range(len(snake_case ) ):
_lowerCAmelCase = order[len(snake_case ) - i - 1]
if not visited[vert]:
_lowerCAmelCase = find_components(snake_case , snake_case , snake_case )
components_list.append(snake_case )
return components_list
| 82 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Union[str, Any]=30 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : int=3 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[Any]=32 ,lowerCamelCase__ : List[str]=5 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : int=0.6 ,lowerCamelCase__ : Tuple=None ,):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Optional[Any] = num_channels
_UpperCamelCase : int = is_training
_UpperCamelCase : int = use_labels
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = mask_ratio
_UpperCamelCase : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCamelCase__ )
_UpperCamelCase : List[str] = (self.image_size // self.patch_size) ** 2
_UpperCamelCase : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCamelCase : int = 1
_UpperCamelCase : Dict = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : str = model(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = ViTMAEModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(lowerCamelCase__ )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
_UpperCamelCase : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_UpperCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase : Any = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase : Optional[Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Any = outputs[0].cpu().numpy()
_UpperCamelCase : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
# Make sure we don't have nans
_UpperCamelCase : str = after_outputs[0].cpu().numpy()
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ ,1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_UpperCamelCase : str = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCamelCase__ )
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : List[str] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase : Any = ViTMAEConfig()
_UpperCamelCase : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCamelCase : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**lowerCamelCase__ ,noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
_UpperCamelCase : str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : List[str] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowerCamelCase__ ) ,atol=1E-4 ) )
| 83 |
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = factor * value
UpperCAmelCase__ = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 346 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :jnp.ndarray
UpperCAmelCase_ :jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
UpperCAmelCase_ :int
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
UpperCAmelCase_ :jnp.dtype = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ :int = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels[i]
lowerCAmelCase_ :Optional[int] = self.block_out_channels[i + 1]
lowerCAmelCase_ :int = nn.Conv(
__A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :Optional[int] = blocks
lowerCAmelCase_ :int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A ) -> Tuple:
lowerCAmelCase_ :Dict = self.conv_in(__A )
lowerCAmelCase_ :List[str] = nn.silu(__A )
for block in self.blocks:
lowerCAmelCase_ :Any = block(__A )
lowerCAmelCase_ :Optional[int] = nn.silu(__A )
lowerCAmelCase_ :List[Any] = self.conv_out(__A )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , A__ , A__ ):
UpperCAmelCase_ :int = 32
UpperCAmelCase_ :int = 4
UpperCAmelCase_ :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ :Union[bool, Tuple[bool]] = False
UpperCAmelCase_ :Tuple[int] = (320, 640, 1280, 1280)
UpperCAmelCase_ :int = 2
UpperCAmelCase_ :Union[int, Tuple[int]] = 8
UpperCAmelCase_ :Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase_ :int = 1280
UpperCAmelCase_ :float = 0.0
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :jnp.dtype = jnp.floataa
UpperCAmelCase_ :bool = True
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :str = "rgb"
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
def __lowerCAmelCase ( self , __A ) -> FrozenDict:
# init input tensors
lowerCAmelCase_ :Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ :Dict = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ :List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ :Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ :Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ :Optional[int] = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = jax.random.split(__A )
lowerCAmelCase_ :Optional[int] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__A , __A , __A , __A , __A )["params"]
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels
lowerCAmelCase_ :int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ :Dict = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ :int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ :Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ :Optional[Any] = FlaxTimestepEmbedding(__A , dtype=self.dtype )
lowerCAmelCase_ :int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ :List[str] = self.only_cross_attention
if isinstance(__A , __A ):
lowerCAmelCase_ :List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A , __A ):
lowerCAmelCase_ :Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Dict = block_out_channels[0]
lowerCAmelCase_ :List[Any] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ :List[Any] = output_channel
lowerCAmelCase_ :List[str] = block_out_channels[i]
lowerCAmelCase_ :Tuple = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ :Tuple = FlaxCrossAttnDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ :Optional[int] = FlaxDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__A )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
if not is_final_block:
lowerCAmelCase_ :str = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
lowerCAmelCase_ :List[Any] = down_blocks
lowerCAmelCase_ :Optional[Any] = controlnet_down_blocks
# mid
lowerCAmelCase_ :int = block_out_channels[-1]
lowerCAmelCase_ :List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ :Dict = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A , __A , __A , __A , __A = 1.0 , __A = True , __A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowerCAmelCase_ :Union[str, Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ :Optional[int] = jnp.flip(__A , axis=1 )
# 1. time
if not isinstance(__A , jnp.ndarray ):
lowerCAmelCase_ :List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ :str = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ :Union[str, Any] = jnp.expand_dims(__A , 0 )
lowerCAmelCase_ :List[Any] = self.time_proj(__A )
lowerCAmelCase_ :Optional[Any] = self.time_embedding(__A )
# 2. pre-process
lowerCAmelCase_ :int = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[Any] = self.conv_in(__A )
lowerCAmelCase_ :Union[str, Any] = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[str] = self.controlnet_cond_embedding(__A )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ :Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__A , __A ):
lowerCAmelCase_ , lowerCAmelCase_ :Any = down_block(__A , __A , __A , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = down_block(__A , __A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ :int = self.mid_block(__A , __A , __A , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ :Dict = ()
for down_block_res_sample, controlnet_block in zip(__A , self.controlnet_down_blocks ):
lowerCAmelCase_ :Union[str, Any] = controlnet_block(__A )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ :Optional[Any] = controlnet_down_block_res_samples
lowerCAmelCase_ :List[Any] = self.controlnet_mid_block(__A )
# 6. scaling
lowerCAmelCase_ :List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__A , mid_block_res_sample=__A )
| 84 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" )
UpperCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 346 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self , a__ ) -> Dict:
'''simple docstring'''
with open(a__ , encoding="utf-8" ) as input_file:
snake_case_ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
snake_case_ = input_file.read()
snake_case_ = regexp.search(a__ )
return match
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
with open(a__ , encoding="utf-8" ) as input_file:
snake_case_ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
snake_case_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
snake_case_ = regexp.finditer(a__ )
snake_case_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Path("./datasets" )
snake_case_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a__ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Path("./datasets" )
snake_case_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(a__ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 85 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = AlbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCAmelCase : List[Any] = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = do_lower_case
__lowerCAmelCase : List[Any] = remove_space
__lowerCAmelCase : List[Any] = keep_accents
__lowerCAmelCase : int = vocab_file
__lowerCAmelCase : Dict = False if not self.vocab_file else True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Dict = [self.sep_token_id]
__lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Optional[int] = [self.sep_token_id]
__lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,) | 86 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,)
lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 346 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
lowercase__ : Optional[Any] = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def __UpperCamelCase ( self : str ) -> int:
lowercase__ : Optional[int] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __UpperCamelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=None ) -> Any:
lowercase__ : int = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : Optional[Any] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Tuple = os.path.join(self.transformer_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
lowercase__ : List[str] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Union[str, Any] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowercase_ , overwrite_result=re.sub("Bert" , "TestModel" , lowercase_ ) , )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowercase__ : Optional[int] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
lowercase__ : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
lowercase__ : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowercase__ : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
lowercase__ , lowercase__ : Tuple = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Optional[Any] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
lowercase__ : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
lowercase__ : Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowercase__ : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowercase__ , lowercase__ : Dict = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_ )
| 87 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """vivit"""
def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = tubelet_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
super().__init__(**_UpperCAmelCase )
| 346 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__lowerCAmelCase : int = False
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any]=32 ) -> List[str]:
"""simple docstring"""
set_seed(0 )
__magic_name__ = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
__magic_name__ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__magic_name__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
__magic_name__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__magic_name__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
__magic_name__ = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
__magic_name__ = [torch.randint(0 , 1000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
__magic_name__ , __magic_name__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ = model(UpperCamelCase__ , timesteps[i] ).sample
__magic_name__ = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__magic_name__ , __magic_name__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ = model(UpperCamelCase__ , timesteps[i] ).sample
__magic_name__ = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> Dict:
_a : str = bnb_quantization_config.load_in_abit
_a : Dict = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_a : str = []
# custom device map
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(device_map.keys() ) > 1:
_a : List[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_a : Optional[int] = get_keys_to_not_convert(lowerCAmelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase_ )
_a : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_a : List[str] = []
_a : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase_ )
# compatibility with peft
_a : Optional[Any] = load_in_abit
_a : Dict = load_in_abit
_a : Tuple = get_parameter_device(lowerCAmelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_a : int = replace_with_bnb_layers(lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
# convert param to the right dtype
_a : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_a : Any = name.replace('.weight' , '' ).replace('.bias' , '' )
_a : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase_ ):
param.to(lowerCAmelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_a : str = replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
_a : List[Any] = get_quantized_model_device_map(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_memory=lowerCAmelCase_ , no_split_module_classes=lowerCAmelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_a : Any = True
_a : Union[str, Any] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase_ , offload_state_dict=lowerCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase_ , device_map=lowerCAmelCase_ , offload_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]:
if device_map is None:
if torch.cuda.is_available():
_a : List[Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_a : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_a : int = {}
_a : str = special_dtypes
_a : Tuple = no_split_module_classes
_a : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_a : Optional[Any] = get_balanced_memory(
lowerCAmelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a : Optional[Any] = max_memory
_a : Any = infer_auto_device_map(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# check if don't have any quantized module on the cpu
_a : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_a : Tuple = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
if modules_to_not_convert is None:
_a : Optional[int] = []
_a , _a : Tuple = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[Any]:
_a : Any = False
for name, module in model.named_children():
if current_key_name is None:
_a : List[Any] = []
current_key_name.append(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_a : str = '.'.join(lowerCAmelCase_ )
_a : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_a : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_a : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_a : Optional[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_a : Optional[Any] = module.weight.data
if module.bias is not None:
_a : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[int] = True
if len(list(module.children() ) ) > 0:
_a , _a : Optional[int] = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
# Create a copy of the model
with init_empty_weights():
_a : Optional[int] = deepcopy(lowerCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_a : int = find_tied_parameters(lowerCAmelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_a : List[str] = sum(lowerCAmelCase_ , [] )
_a : Union[str, Any] = len(lowerCAmelCase_ ) > 0
# Check if it is a base model
_a : str = False
if hasattr(lowerCAmelCase_ , 'base_model_prefix' ):
_a : str = not hasattr(lowerCAmelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : Optional[int] = list(model.named_children() )
_a : int = [list_modules[-1][0]]
# add last module together with tied weights
_a : str = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
_a : Optional[Any] = list(set(lowerCAmelCase_ ) ) + list(lowerCAmelCase_ )
# remove ".weight" from the keys
_a : List[str] = ['.weight', '.bias']
_a : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : int = name.replace(lowerCAmelCase_ , '' )
filtered_module_names.append(lowerCAmelCase_ )
return filtered_module_names
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for m in model.modules():
if isinstance(lowerCAmelCase_ , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
return next(parameter.parameters() ).device
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 0 , dtype=lowerCAmelCase_ , value=lowerCAmelCase_ )
_a : List[Any] = param_name
_a : List[Any] = model
if "." in tensor_name:
_a : Tuple = tensor_name.split('.' )
for split in splits[:-1]:
_a : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_a : Dict = new_module
_a : List[Any] = splits[-1]
# offload weights
_a : List[str] = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ , )
else:
offload_weight(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
offload_weight(lowerCAmelCase_ , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ )
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 'meta' , dtype=lowerCAmelCase_ , value=torch.empty(*param.size() ) )
| 89 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any=1024 ) -> Dict:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase , __lowerCamelCase = sorted_examples[0]
def is_too_big(UpperCamelCase__ : List[str] ):
return tok(UpperCamelCase__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase = new_src + ' ' + src
__lowerCamelCase = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase , __lowerCamelCase = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.target""" )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase__ , default=128 )
parser.add_argument('--data_dir' , type=UpperCamelCase__ )
parser.add_argument('--save_path' , type=UpperCamelCase__ )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 90 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
UpperCAmelCase__ = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples["""text"""] )
return fn
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
UpperCAmelCase__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE__ : int ):
# Concatenate all texts.
UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase__ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase__ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 346 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A (__a , __a , __a , __a , __a , __a ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ksize + 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
SCREAMING_SNAKE_CASE_ : List[str] = x - ksize // 2
SCREAMING_SNAKE_CASE_ : List[str] = y - ksize // 2
# degree to radiant
SCREAMING_SNAKE_CASE_ : int = theta / 1_80 * np.pi
SCREAMING_SNAKE_CASE_ : str = np.cos(_theta )
SCREAMING_SNAKE_CASE_ : Dict = np.sin(_theta )
# get kernel x
SCREAMING_SNAKE_CASE_ : Any = cos_theta * px + sin_theta * py
# get kernel y
SCREAMING_SNAKE_CASE_ : Dict = -sin_theta * px + cos_theta * py
# fill kernel
SCREAMING_SNAKE_CASE_ : Tuple = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase_ : str = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
UpperCAmelCase_ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase_ : Optional[int] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCAmelCase_ : List[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase_ : Dict = out / out.max() * 255
UpperCAmelCase_ : Tuple = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 91 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ = '\\n\n'
UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = """cuda"""
else:
UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model.to(_UpperCAmelCase )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
UpperCAmelCase__ = encodings["""input_ids"""]
UpperCAmelCase__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 346 | 0 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=snake_case__ ):
_a : Any = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : int = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : Union[str, Any] = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : int = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : int = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : Tuple = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : List[Any] = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : int = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : Tuple = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : Optional[int] = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : str = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : str = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class a__ ( metaclass=snake_case__ ):
_a : List[str] = ["""flax"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 92 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : str = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_image(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 346 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCamelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCamelCase , )
a :Optional[int] = AutoencoderKL()
a :str = DDIMScheduler()
a :List[str] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
a :str = torch.manual_seed(_lowerCamelCase )
else:
a :Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Optional[Any] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = '''cpu'''
a :int = self.get_dummy_components()
a :Union[str, Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Tuple = self.get_dummy_inputs(_lowerCamelCase )
a :Optional[Any] = pipe(**_lowerCamelCase ).images
a :int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
a :Optional[Any] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
a :Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = torch.manual_seed(0 )
a :str = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
a :str = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
a :Any = pipe.get_label_ids(_lowerCamelCase )
a :Tuple = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCamelCase , _lowerCamelCase ):
a :Optional[Any] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
a :Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
a :Optional[int] = ['''vase''', '''umbrella''']
a :int = pipe.get_label_ids(_lowerCamelCase )
a :Union[str, Any] = torch.manual_seed(0 )
a :List[str] = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCamelCase , _lowerCamelCase ):
a :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 94 |
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ):
'''simple docstring'''
UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 346 | 0 |
UpperCAmelCase : Optional[int] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 95 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('only integers accepted as input' )
else:
_lowerCamelCase : List[Any] = str(abs(lowercase__ ) )
_lowerCamelCase : List[Any] = [list(lowercase__ ) for char in range(len(lowercase__ ) )]
for index in range(len(lowercase__ ) ):
num_transpositions[index].pop(lowercase__ )
return max(
int(''.join(list(lowercase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod() | 96 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 | 0 |
'''simple docstring'''
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = [], []
while len(__a ) > 1:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = min(__a ), max(__a )
start.append(__a )
end.append(__a )
collection.remove(__a )
collection.remove(__a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''') | 97 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )]
if identifier is not None:
UpperCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase__ = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ = [file for file in files if n_identifier not in file]
UpperCAmelCase__ = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _UpperCAmelCase )
if only_modules:
UpperCAmelCase__ = file.split(""".""" )[0]
try:
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase )
UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """modeling"""
UpperCAmelCase__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """tokenization"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """configuration"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""docs/source""" )
UpperCAmelCase__ = ["""favicon.ico"""]
self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
| 346 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = PhobertTokenizer
snake_case__ = False
def __lowerCAmelCase ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
UpperCAmelCase__ = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase__ = ['#version: 0.2', 'l à</w>']
UpperCAmelCase__ = {'unk_token': '<unk>'}
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : List[Any] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = 'Tôi là VinAI Research'
UpperCAmelCase__ = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = PhobertTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase__ = 'Tôi là VinAI Research'
UpperCAmelCase__ = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ):
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ):
pass
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None
with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__"""
UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ = """__test_patch_submodule_successive_join__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
| 346 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A_ ( ) -> Any:
a__ : Tuple = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ : Dict = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(A__ )
# Let's go
a__ : str = parser.parse_args()
if not hasattr(A__ , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ : Optional[Any] = args.func(A__ )
service.run()
if __name__ == "__main__":
main()
| 99 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = BlipImageProcessor()
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""")
__SCREAMING_SNAKE_CASE = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
def snake_case_ ( self , **lowerCAmelCase__):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).tokenizer
def snake_case_ ( self , **lowerCAmelCase__):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).image_processor
def snake_case_ ( self):
shutil.rmtree(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1)) for x in image_inputs]
return image_inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0)
__SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors="""np""")
__SCREAMING_SNAKE_CASE = processor(images=lowerCAmelCase__ , return_tensors="""np""")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """input_ids""", """attention_mask"""])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """input_ids""", """attention_mask"""])
| 100 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :List[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
SCREAMING_SNAKE_CASE : Optional[int] = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
SCREAMING_SNAKE_CASE : Optional[int] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int]=False , _snake_case : int=False , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=False , _snake_case : Optional[int]="dummy_doc" ) ->int:
"""simple docstring"""
__snake_case : Any = {doc: key_lines}
__snake_case : str = {doc: sys_lines}
__snake_case : Any = {}
__snake_case : Union[str, Any] = 0
__snake_case : Dict = 0
__snake_case : Optional[Any] = 0
__snake_case : List[Any] = 0
__snake_case : int = 0
__snake_case : Optional[Any] = 0
__snake_case , __snake_case : str = reader.get_doc_mentions(_snake_case , key_doc_lines[doc] , _snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
__snake_case : List[Any] = reader.set_annotated_parse_trees(_snake_case , key_doc_lines[doc] , _snake_case , _snake_case )
__snake_case , __snake_case : int = reader.get_doc_mentions(_snake_case , sys_doc_lines[doc] , _snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
__snake_case : str = reader.set_annotated_parse_trees(_snake_case , key_doc_lines[doc] , _snake_case , _snake_case )
if remove_nested:
__snake_case , __snake_case : Optional[int] = reader.remove_nested_coref_mentions(_snake_case , _snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__snake_case , __snake_case : int = reader.remove_nested_coref_mentions(_snake_case , _snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__snake_case : List[Any] = reader.get_mention_assignments(_snake_case , _snake_case )
__snake_case : Any = reader.get_mention_assignments(_snake_case , _snake_case )
__snake_case : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def lowercase ( _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] ) ->Optional[int]:
"""simple docstring"""
__snake_case : str = get_coref_infos(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
__snake_case : Union[str, Any] = {}
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = 0
for name, metric in metrics:
__snake_case , __snake_case , __snake_case : Tuple = evaluator.evaluate_documents(_snake_case , _snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
__snake_case : Optional[Any] = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
__snake_case : Any = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
__snake_case : Dict = line.split()[5]
if not parse_col == "-":
__snake_case : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=True , a_=False , a_=False , a_=False ):
'''simple docstring'''
__snake_case : Optional[int] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__snake_case : Optional[int] = util.check_gold_parse_annotation(a_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__snake_case : Tuple = evaluate(
key_lines=a_ , sys_lines=a_ , metrics=a_ , NP_only=a_ , remove_nested=a_ , keep_singletons=a_ , min_span=a_ , )
return score
| 102 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = False
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
UpperCAmelCase__ = TaConfig(
vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , )
UpperCAmelCase__ = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase ):
UpperCAmelCase__ = TaBlock(_UpperCAmelCase )
self.encoders.append(_UpperCAmelCase )
UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase )
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase )
UpperCAmelCase__ = encoder_input_tokens.shape[1]
UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_UpperCAmelCase )
UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase )
# inverted the attention mask
UpperCAmelCase__ = encoder_input_tokens.size()
UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase )
for lyr in self.encoders:
UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0]
UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase )
return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
| 346 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase( ):
lowerCAmelCase_ : Tuple = 9
lowerCAmelCase_ : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase_ : List[Any] = kruskal(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__UpperCamelCase ) == sorted(__UpperCamelCase )
| 103 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = {}
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if """lm_head""" in full_key else value[0]
UpperCAmelCase_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = """weight"""
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return is_used
return is_used
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase__ = name.split(""".""" )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" )
UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 346 | 0 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 104 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
lowerCamelCase : Any =(DPMSolverSinglestepScheduler,)
lowerCamelCase : str =(("""num_inference_steps""", 25),)
def __a ( self , **lowerCAmelCase__ ) -> str:
a : List[Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCAmelCase__ )
return config
def __a ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Union[str, Any]:
a : Any = dict(self.forward_default_kwargs )
a : Dict = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a : List[str] = self.dummy_sample
a : List[Any] = 0.1 * sample
a : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a : Tuple = self.get_scheduler_config(**lowerCAmelCase__ )
a : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a : str = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a : int = dummy_past_residuals[: new_scheduler.config.solver_order]
a, a : Union[str, Any] = sample, sample
for t in range(lowerCAmelCase__ , time_step + scheduler.config.solver_order + 1 ):
a : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a : Optional[Any] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self ) -> Tuple:
pass
def __a ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Tuple:
a : Union[str, Any] = dict(self.forward_default_kwargs )
a : int = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a : List[str] = self.dummy_sample
a : Optional[int] = 0.1 * sample
a : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a : Dict = self.get_scheduler_config()
a : str = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a : List[str] = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a : Optional[int] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
if scheduler is None:
a : Dict = self.scheduler_classes[0]
a : int = self.get_scheduler_config(**lowerCAmelCase__ )
a : Tuple = scheduler_class(**lowerCAmelCase__ )
a : List[Any] = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config(**lowerCAmelCase__ )
a : List[Any] = scheduler_class(**lowerCAmelCase__ )
a : int = 10
a : str = self.dummy_model()
a : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def __a ( self ) -> str:
a : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a : Optional[int] = 50
a : List[str] = self.dummy_model()
a : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
a : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
a : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def __a ( self ) -> Union[str, Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a : List[Any] = self.full_loop(scheduler=lowerCAmelCase__ )
a : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
a : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
a : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
a : int = UniPCMultistepScheduler.from_config(scheduler.config )
a : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a : List[Any] = self.full_loop(scheduler=lowerCAmelCase__ )
a : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def __a ( self ) -> Optional[int]:
self.check_over_configs(thresholding=lowerCAmelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , )
def __a ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __a ( self ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
a : Optional[Any] = self.full_loop(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
assert not torch.isnan(lowerCAmelCase__ ).any(), "Samples have nan numbers"
def __a ( self ) -> Any:
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __a ( self ) -> Tuple:
self.check_over_configs(variance_type=lowerCAmelCase__ )
self.check_over_configs(variance_type="learned_range" )
def __a ( self ) -> Any:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=0 )
def __a ( self ) -> List[str]:
a : Any = self.full_loop()
a : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def __a ( self ) -> Any:
a : Optional[int] = self.full_loop(use_karras_sigmas=lowerCAmelCase__ )
a : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def __a ( self ) -> Optional[int]:
a : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
a : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def __a ( self ) -> Optional[int]:
a : List[str] = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCAmelCase__ )
a : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def __a ( self ) -> Any:
a : int = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config(thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0 )
a : Tuple = scheduler_class(**lowerCAmelCase__ )
a : Optional[int] = 10
a : List[Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 105 |
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = factor * value
UpperCAmelCase__ = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 346 | 0 |
"""simple docstring"""
import math
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCamelCase : Any = 0.0_2
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Tuple = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(A_ ):
# Forward propagation
lowerCAmelCase__ : Dict = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCAmelCase__ : List[Any] = (expected / 1_00) - layer_a
# Error delta
lowerCAmelCase__ : str = layer_1_error * sigmoid_function(A_ , A_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[str] = int(input('''Expected value: '''))
__UpperCamelCase : Optional[Any] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 106 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" )
UpperCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 346 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__lowerCAmelCase : Optional[Any] = TypeVar('T')
class snake_case__ (Generic[T] ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[T] , __lowerCamelCase : Callable[[T, T], T] ) -> None:
a = None
a = len(__lowerCamelCase )
a = [any_type for _ in range(self.N )] + arr
a = fnc
self.build()
def __UpperCAmelCase ( self : List[str] ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : T ) -> None:
p += self.N
a = v
while p > 1:
a = p // 2
a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> T | None: # noqa: E741
a , a = l + self.N, r + self.N
a = None
while l <= r:
if l % 2 == 1:
a = self.st[l] if res is None else self.fn(__lowerCamelCase , self.st[l] )
if r % 2 == 0:
a = self.st[r] if res is None else self.fn(__lowerCamelCase , self.st[r] )
a , a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__lowerCAmelCase : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__lowerCAmelCase : Dict = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__lowerCAmelCase : List[str] = SegmentTree(test_array, min)
__lowerCAmelCase : Union[str, Any] = SegmentTree(test_array, max)
__lowerCAmelCase : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def __magic_name__ ( ):
'''simple docstring'''
for i in range(len(A ) ):
for j in range(A, len(A ) ):
a = reduce(A, test_array[i : j + 1] )
a = reduce(A, test_array[i : j + 1] )
a = reduce(lambda A, A : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(A, A )
assert max_range == max_segment_tree.query(A, A )
assert sum_range == sum_segment_tree.query(A, A )
test_all_segments()
for index, value in test_updates.items():
__lowerCAmelCase : List[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 107 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =LongformerTokenizer
a : str =True
a : Any =LongformerTokenizerFast
a : Optional[int] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : Dict = {"unk_token": "<unk>"}
lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = "lower newer"
lowerCAmelCase : List[str] = "lower newer"
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : Optional[Any] = "lower newer"
lowerCAmelCase : str = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(snake_case__ ) # , add_prefix_space=True)
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=snake_case__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=snake_case__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
lowerCAmelCase : int = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
lowerCAmelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
lowerCAmelCase : Any = tokenizer.encode(
"sequence builders" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : int = "Encode this sequence."
lowerCAmelCase : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(snake_case__ , snake_case__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowerCAmelCase : Dict = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(snake_case__ , snake_case__ )
# Testing spaces after special tokens
lowerCAmelCase : Optional[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ )} ) # mask token has a left space
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(snake_case__ )
lowerCAmelCase : Union[str, Any] = "Encode <mask> sequence"
lowerCAmelCase : Union[str, Any] = "Encode <mask>sequence"
lowerCAmelCase : str = tokenizer.encode(snake_case__ )
lowerCAmelCase : Any = encoded.index(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.encode(snake_case__ )
lowerCAmelCase : Tuple = encoded.index(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = "A, <mask> AllenNLP sentence."
lowerCAmelCase : List[Any] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
lowerCAmelCase : Dict = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowercase__ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , snake_case__ )
self.assertEqual(post_processor_state["add_prefix_space"] , snake_case__ )
self.assertEqual(post_processor_state["trim_offsets"] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase : Optional[int] = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Any = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : List[Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Any = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
| 108 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,)
lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 346 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = KandinskyInpaintPipeline
__lowerCAmelCase : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
__lowerCAmelCase : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
__lowerCAmelCase : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowerCAmelCase : str = False
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase : Any = MultilingualCLIP(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase : Tuple = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase : Any = self.dummy_tokenizer
UpperCAmelCase : int = self.dummy_unet
UpperCAmelCase : str = self.dummy_movq
UpperCAmelCase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_SCREAMING_SNAKE_CASE )
# create init_image
UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
UpperCAmelCase : Optional[int] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase : str = 0
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
UpperCAmelCase : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = """cpu"""
UpperCAmelCase : str = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : str = output.images
UpperCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : int = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase : int = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = """a hat"""
UpperCAmelCase : Optional[int] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCAmelCase : List[Any] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : Tuple = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase : int = pipeline(
_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 109 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """vivit"""
def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = tubelet_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
super().__init__(**_UpperCAmelCase )
| 346 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _A ( A__ ):
"""simple docstring"""
__lowercase = filter(lambda A__ : p.requires_grad , model.parameters() )
__lowercase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase__ = logging.getLogger(__name__)
def _A ( A__ , A__ ):
"""simple docstring"""
if metric == "rouge2":
__lowercase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__lowercase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__lowercase = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__lowercase = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__lowercase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=F"val_{metric}" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _A ( A__ , A__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , )
class lowercase_ (pl.Callback ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : List[Any] ):
__lowercase = {F"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCAmelCase )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : pl.Trainer ,lowercase__ : pl.LightningModule ,lowercase__ : str ,lowercase__ : int=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
__lowercase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__lowercase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowercase = od / '''test_results.txt'''
__lowercase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowercase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
__lowercase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=_UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase ,'''a+''' ) as writer:
for key in sorted(_UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowercase = metrics[key]
if isinstance(_UpperCAmelCase ,torch.Tensor ):
__lowercase = val.item()
__lowercase = F"{key}: {val:.6f}\n"
writer.write(_UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
__lowercase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_UpperCAmelCase )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple ):
try:
__lowercase = pl_module.model.model.num_parameters()
except AttributeError:
__lowercase = pl_module.model.num_parameters()
__lowercase = count_trainable_parameters(_UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : pl.Trainer ,lowercase__ : pl.LightningModule ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_UpperCAmelCase ,_UpperCAmelCase ,'''test''' )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : pl.Trainer ,lowercase__ : Any ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 104 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return EnvironmentCommand()
def lowercase__( __UpperCamelCase: Dict ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class _a ( lowerCamelCase_ ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parser.add_parser('env' )
download_parser.set_defaults(func=_UpperCAmelCase )
download_parser.add_argument(
'--accelerate-config_file', default=_UpperCAmelCase, help='The accelerate config file to use for the default values in the launching script.', )
download_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self, A, *A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = accelerate_config_file
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 'not installed'
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE : List[Any] = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
SCREAMING_SNAKE_CASE : List[str] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
SCREAMING_SNAKE_CASE : int = 'not installed'
SCREAMING_SNAKE_CASE : List[Any] = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE : str = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = (
'\n'.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase, _UpperCAmelCase )
else F"\t{accelerate_config}"
)
SCREAMING_SNAKE_CASE : str = 'not installed'
SCREAMING_SNAKE_CASE : List[str] = 'NA'
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : Optional[int] = torch.__version__
SCREAMING_SNAKE_CASE : int = torch.cuda.is_available()
SCREAMING_SNAKE_CASE : Dict = 'not installed'
SCREAMING_SNAKE_CASE : int = 'NA'
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE : Dict = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE : Dict = bool(tf.config.list_physical_devices('GPU' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = 'not installed'
SCREAMING_SNAKE_CASE : Any = 'not installed'
SCREAMING_SNAKE_CASE : Optional[Any] = 'not installed'
SCREAMING_SNAKE_CASE : Any = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE : int = flax.__version__
SCREAMING_SNAKE_CASE : List[str] = jax.__version__
SCREAMING_SNAKE_CASE : List[Any] = jaxlib.__version__
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE : Optional[Any] = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F"{safetensors_version}",
'Accelerate version': F"{accelerate_version}",
'Accelerate config': F"{accelerate_config_str}",
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'Tensorflow version (GPU?)': F"{tf_version} ({tf_cuda_available})",
'Flax version (CPU?/GPU?/TPU?)': F"{flax_version} ({jax_backend})",
'Jax version': F"{jax_version}",
'JaxLib version': F"{jaxlib_version}",
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_UpperCAmelCase ) )
return info
@staticmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 251 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346 | 0 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ,**__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()]
__lowerCAmelCase : Optional[Any] = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
__lowerCAmelCase : Optional[Any] = calculate_rouge(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if save_path is not None:
save_json(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 269 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
UpperCAmelCase__ = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples["""text"""] )
return fn
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
UpperCAmelCase__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE__ : int ):
# Concatenate all texts.
UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase__ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase__ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 346 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
A : Tuple = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : Optional[int] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Any = False
A : List[Any] = False
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
lowercase : Union[str, Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ):
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : List[Any] = seq_length
lowercase : Optional[Any] = is_training
lowercase : Dict = use_input_mask
lowercase : List[Any] = use_token_type_ids
lowercase : Dict = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : int = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Optional[Any] = type_sequence_label_size
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = num_labels
lowercase : Optional[int] = num_choices
lowercase : int = scope
lowercase : Dict = embedding_size
def __lowerCamelCase ( self ):
lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = None
if self.use_input_mask:
lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : int = None
if self.use_token_type_ids:
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[Any] = None
lowercase : List[str] = None
lowercase : Dict = None
if self.use_labels:
lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = TFMobileBertModel(config=_UpperCAmelCase )
lowercase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : int = model(_UpperCAmelCase )
lowercase : Dict = [input_ids, input_mask]
lowercase : Tuple = model(_UpperCAmelCase )
lowercase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = TFMobileBertForMaskedLM(config=_UpperCAmelCase )
lowercase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = TFMobileBertForNextSentencePrediction(config=_UpperCAmelCase )
lowercase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = TFMobileBertForPreTraining(config=_UpperCAmelCase )
lowercase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = self.num_labels
lowercase : str = TFMobileBertForSequenceClassification(config=_UpperCAmelCase )
lowercase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.num_choices
lowercase : int = TFMobileBertForMultipleChoice(config=_UpperCAmelCase )
lowercase : Union[str, Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Any = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self.num_labels
lowercase : Tuple = TFMobileBertForTokenClassification(config=_UpperCAmelCase )
lowercase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = TFMobileBertForQuestionAnswering(config=_UpperCAmelCase )
lowercase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Tuple = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
@slow
def __lowerCamelCase ( self ):
for model_name in ["google/mobilebert-uncased"]:
lowercase : Union[str, Any] = TFMobileBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : int = model(_UpperCAmelCase )[0]
lowercase : Optional[Any] = [1, 6, 30522]
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase : Any = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 337 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ = '\\n\n'
UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = """cuda"""
else:
UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model.to(_UpperCAmelCase )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
UpperCAmelCase__ = encodings["""input_ids"""]
UpperCAmelCase__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 346 | 0 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a):
lowercase__ : List[Any] = data
lowercase__ : str = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any = None
def snake_case_ ( self):
lowercase__ : List[str] = self.head
while temp is not None:
print(temp.data , end=' ')
lowercase__ : Union[str, Any] = temp.next
print()
def snake_case_ ( self , a):
lowercase__ : Any = Node(_UpperCAmelCase)
lowercase__ : str = self.head
lowercase__ : Dict = new_node
def snake_case_ ( self , a , a):
if node_data_a == node_data_a:
return
else:
lowercase__ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ : Union[str, Any] = node_a.next
lowercase__ : Union[str, Any] = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
lowercase__ , lowercase__ : Optional[Any] = node_a.data, node_a.data
if __name__ == "__main__":
snake_case_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 214 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 1 , lowercase__ = 1 , lowercase__ = 1.0e4 , lowercase__ = False , lowercase__ = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
__SCREAMING_SNAKE_CASE : str = float(embedding_dim // 2 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__SCREAMING_SNAKE_CASE : Dict = min_timescale * jnp.exp(jnp.arange(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) * -log_timescale_increment )
__SCREAMING_SNAKE_CASE : str = jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) * jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 0 )
# scale embeddings
__SCREAMING_SNAKE_CASE : str = scale * emb
if flip_sin_to_cos:
__SCREAMING_SNAKE_CASE : Dict = jnp.concatenate([jnp.cos(SCREAMING_SNAKE_CASE__ ), jnp.sin(SCREAMING_SNAKE_CASE__ )] , axis=1 )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.concatenate([jnp.sin(SCREAMING_SNAKE_CASE__ ), jnp.cos(SCREAMING_SNAKE_CASE__ )] , axis=1 )
__SCREAMING_SNAKE_CASE : Any = jnp.reshape(SCREAMING_SNAKE_CASE__ , [jnp.shape(SCREAMING_SNAKE_CASE__ )[0], embedding_dim] )
return signal
class _lowercase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self :List[Any] , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = nn.silu(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(_UpperCAmelCase )
return temb
class _lowercase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : float = 1
@nn.compact
def __call__( self :List[str] , lowerCAmelCase__ :int ) -> Tuple:
return get_sinusoidal_embeddings(
_UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 9 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_image(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 346 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__A = HUGGINGFACE_HUB_CACHE
__A = "config.json"
__A = "diffusion_pytorch_model.bin"
__A = "diffusion_flax_model.msgpack"
__A = "model.onnx"
__A = "diffusion_pytorch_model.safetensors"
__A = "weights.pb"
__A = "https://huggingface.co"
__A = default_cache_path
__A = "diffusers_modules"
__A = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__A = ["fp16", "non-ema"]
__A = ".self_attn"
| 177 |
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ):
'''simple docstring'''
UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 346 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__a: int = HfApi()
__a: Optional[int] = {}
# fmt: off
__a: Any = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
__a: Any = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
__a: int = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
__a: int = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
__a: int = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
__a: Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
__a: List[str] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
__a: int = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
__a: Dict = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
__a: str = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
__a: Tuple = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
__a: Optional[Any] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
__a: int = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
__a: List[Any] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
__a: Optional[Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
__a: Union[str, Any] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__a: List[Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith("""CompVis"""):
__a: List[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
__a: Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__a: List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__a: Dict = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__a: Tuple = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!')
| 198 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str =logging.get_logger(__name__)
lowerCamelCase : int ='''▁'''
lowerCamelCase : int ={'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : Optional[int] ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase : Any ={
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCamelCase : Optional[Any] =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __a ( lowerCamelCase_ ):
_lowerCAmelCase : Any = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
_lowerCAmelCase : List[int] = []
_lowerCAmelCase : List[int] = []
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int="<s>" , SCREAMING_SNAKE_CASE : Any="</s>" , SCREAMING_SNAKE_CASE : Optional[int]="</s>" , SCREAMING_SNAKE_CASE : List[str]="<s>" , SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE : int="<pad>" , SCREAMING_SNAKE_CASE : Dict="<mask>" , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , SCREAMING_SNAKE_CASE : int=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
UpperCamelCase__ : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCamelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
UpperCamelCase__ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : List[Any] = len(self.sp_model )
UpperCamelCase__ : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
UpperCamelCase__ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ : Dict = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ : str = src_lang if src_lang is not None else "en_XX"
UpperCamelCase__ : str = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.__dict__.copy()
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
UpperCamelCase__ : Tuple = [1] * len(self.prefix_tokens )
UpperCamelCase__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase__ : Union[str, Any] = src_lang
UpperCamelCase__ : Any = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase__ : Dict = self.convert_tokens_to_ids(_UpperCAmelCase )
UpperCamelCase__ : Dict = tgt_lang_id
return inputs
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : Any = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase , " " ).strip()
return out_string
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ : Tuple = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , "wb" ) as fi:
UpperCamelCase__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str = "en_XX" , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : str = "ro_RO" , **SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
UpperCamelCase__ : Dict = src_lang
UpperCamelCase__ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def __lowercase ( self : int ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.lang_code_to_id[src_lang]
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : str = self.lang_code_to_id[lang]
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : List[Any] = [self.eos_token_id, self.cur_lang_code] | 189 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a__ ( lowerCamelCase_ ):
_a : jnp.ndarray
@flax_register_to_config
class a__ ( nn.Module , lowerCamelCase_ , lowerCamelCase_ ):
_a : int = 3_2
_a : int = 4
_a : int = 4
_a : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_a : Union[bool, Tuple[bool]] = False
_a : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a : int = 2
_a : Union[int, Tuple[int]] = 8
_a : Optional[Union[int, Tuple[int]]] = None
_a : int = 1_2_8_0
_a : float = 0.0
_a : bool = False
_a : jnp.dtype = jnp.floataa
_a : bool = True
_a : int = 0
_a : bool = False
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa )
__lowerCAmelCase = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(_UpperCAmelCase )
__lowerCAmelCase = {"params": params_rng, "dropout": dropout_rng}
return self.init(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )["params"]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.block_out_channels
__lowerCAmelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCAmelCase = FlaxTimestepEmbedding(_UpperCAmelCase , dtype=self.dtype )
__lowerCAmelCase = self.only_cross_attention
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase = []
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(_UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase = FlaxDownBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_UpperCAmelCase )
__lowerCAmelCase = down_blocks
# mid
__lowerCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowerCAmelCase = []
__lowerCAmelCase = list(reversed(_UpperCAmelCase ) )
__lowerCAmelCase = list(reversed(_UpperCAmelCase ) )
__lowerCAmelCase = list(reversed(_UpperCAmelCase ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = reversed_block_out_channels[min(i + 1 , len(_UpperCAmelCase ) - 1 )]
__lowerCAmelCase = i == len(_UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowerCAmelCase = FlaxCrossAttnUpBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , prev_output_channel=_UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase = FlaxUpBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , prev_output_channel=_UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_UpperCAmelCase )
__lowerCAmelCase = output_channel
__lowerCAmelCase = up_blocks
# out
__lowerCAmelCase = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
__lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A=None , _A=None , _A = True , _A = False , ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , jnp.ndarray ):
__lowerCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase = jnp.expand_dims(_UpperCAmelCase , 0 )
__lowerCAmelCase = self.time_proj(_UpperCAmelCase )
__lowerCAmelCase = self.time_embedding(_UpperCAmelCase )
# 2. pre-process
__lowerCAmelCase = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) )
__lowerCAmelCase = self.conv_in(_UpperCAmelCase )
# 3. down
__lowerCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase = down_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
else:
__lowerCAmelCase , __lowerCAmelCase = down_block(_UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowerCAmelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
_UpperCAmelCase , _UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase = new_down_block_res_samples
# 4. mid
__lowerCAmelCase = self.mid_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowerCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowerCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase = up_block(
_UpperCAmelCase , temb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , deterministic=not train , )
else:
__lowerCAmelCase = up_block(_UpperCAmelCase , temb=_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , deterministic=not train )
# 6. post-process
__lowerCAmelCase = self.conv_norm_out(_UpperCAmelCase )
__lowerCAmelCase = nn.silu(_UpperCAmelCase )
__lowerCAmelCase = self.conv_out(_UpperCAmelCase )
__lowerCAmelCase = jnp.transpose(_UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_UpperCAmelCase )
| 92 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )]
if identifier is not None:
UpperCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase__ = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ = [file for file in files if n_identifier not in file]
UpperCAmelCase__ = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _UpperCAmelCase )
if only_modules:
UpperCAmelCase__ = file.split(""".""" )[0]
try:
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase )
UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """modeling"""
UpperCAmelCase__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """tokenization"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """configuration"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""docs/source""" )
UpperCAmelCase__ = ["""favicon.ico"""]
self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
| 346 | 0 |
def lowerCamelCase__ ( _A = 600851475143 ):
'''simple docstring'''
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ = 2
snake_case_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ = i
while n % i == 0:
snake_case_ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ):
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ):
pass
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None
with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__"""
UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ = """__test_patch_submodule_successive_join__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
| 346 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=SCREAMING_SNAKE_CASE__ , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=SCREAMING_SNAKE_CASE__ , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=SCREAMING_SNAKE_CASE__ , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def _A ( A__ ):
"""simple docstring"""
def fn(A__ ):
return tokenizer(examples['''text'''] )
return fn
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
__lowercase = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
__lowercase = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _A ( A__ ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
__lowercase = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(SCREAMING_SNAKE_CASE__ )
__lowercase = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A__ ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE__ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowercase = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print('''Wrote file {} containing {} records'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 104 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase_ = " \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, 'schedulers/' ) )
SCREAMING_SNAKE_CASE : str = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase, 'src/diffusers/schedulers/scheduling_ddpm.py' ), os.path.join(self.diffusers_dir, 'schedulers/scheduling_ddpm.py' ), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self, A, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE : Dict = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
SCREAMING_SNAKE_CASE : str = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
SCREAMING_SNAKE_CASE : int = black.format_str(_UpperCAmelCase, mode=_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.diffusers_dir, 'new_code.py' )
with open(_UpperCAmelCase, 'w', newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase, 'r' ) as f:
self.assertTrue(f.read(), _UpperCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', _UpperCAmelCase, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', re.sub('DDPM', 'Test', _UpperCAmelCase ), )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE : List[str] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", F"{long_class_name}SchedulerOutput", re.sub('Bert', _UpperCAmelCase, _UpperCAmelCase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', _UpperCAmelCase, overwrite_result=re.sub('DDPM', 'Test', _UpperCAmelCase ), )
| 251 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class A__ ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""audio_values""", """audio_mask"""]
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: List[str]=2048 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: Tuple=[16, 16] , _SCREAMING_SNAKE_CASE: Optional[int]=128 , _SCREAMING_SNAKE_CASE: Tuple=4_4100 , _SCREAMING_SNAKE_CASE: Optional[Any]=86 , _SCREAMING_SNAKE_CASE: Optional[int]=2048 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , **_SCREAMING_SNAKE_CASE: Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(
feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase , )
__lowerCAmelCase : int = spectrogram_length
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : Dict = patch_size
__lowerCAmelCase : List[Any] = feature_size // self.patch_size[1]
__lowerCAmelCase : Union[str, Any] = n_fft
__lowerCAmelCase : Dict = sampling_rate // hop_length_to_sampling_rate
__lowerCAmelCase : Union[str, Any] = sampling_rate
__lowerCAmelCase : List[Any] = padding_value
__lowerCAmelCase : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_UpperCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.array) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = spectrogram(
_UpperCAmelCase , window_function(self.n_fft , "hann") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__lowerCAmelCase : Optional[int] = log_spec[:, :-1]
__lowerCAmelCase : Dict = log_spec - 20.0
__lowerCAmelCase : int = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self: int , _SCREAMING_SNAKE_CASE: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = True , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Optional[Any]:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__lowerCAmelCase : Tuple = isinstance(_UpperCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
__lowerCAmelCase : Tuple = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__lowerCAmelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray):
__lowerCAmelCase : Any = np.asarray(_UpperCAmelCase , dtype=np.floataa)
elif isinstance(_UpperCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCAmelCase : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCAmelCase : List[Any] = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCAmelCase : Tuple = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
__lowerCAmelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCAmelCase : Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCAmelCase : List[str] = np.array(_UpperCAmelCase).astype(np.floataa)
# convert into correct format for padding
__lowerCAmelCase : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCAmelCase : Dict = np.ones([len(_UpperCAmelCase), 1, max_time_len, self.feature_size]).astype(np.floataa)
__lowerCAmelCase : List[Any] = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase)):
__lowerCAmelCase : Tuple = audio_features[i]
__lowerCAmelCase : Optional[Any] = feature
# return as BatchFeature
if return_attention_mask:
__lowerCAmelCase : Union[str, Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__lowerCAmelCase : Tuple = {"audio_values": padded_audio_features}
__lowerCAmelCase : Any = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase)
return encoded_inputs | 269 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : int = path_or_paths
lowercase : Optional[Any] = split if split or isinstance(_UpperCAmelCase , _UpperCAmelCase ) else '''train'''
lowercase : Any = features
lowercase : Optional[int] = cache_dir
lowercase : List[str] = keep_in_memory
lowercase : Optional[Any] = streaming
lowercase : Optional[Any] = num_proc
lowercase : List[Any] = kwargs
@abstractmethod
def __lowerCamelCase ( self ):
pass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[Any] = features
lowercase : List[str] = cache_dir
lowercase : Optional[Any] = keep_in_memory
lowercase : List[str] = streaming
lowercase : str = num_proc
lowercase : Dict = kwargs
@abstractmethod
def __lowerCamelCase ( self ):
pass
| 337 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = False
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
UpperCAmelCase__ = TaConfig(
vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , )
UpperCAmelCase__ = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase ):
UpperCAmelCase__ = TaBlock(_UpperCAmelCase )
self.encoders.append(_UpperCAmelCase )
UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase )
UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase )
UpperCAmelCase__ = encoder_input_tokens.shape[1]
UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_UpperCAmelCase )
UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase )
# inverted the attention mask
UpperCAmelCase__ = encoder_input_tokens.size()
UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase )
for lyr in self.encoders:
UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0]
UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase )
return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
| 346 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case_ = datasets.load_iris()
snake_case_ = np.array(data['''data'''])
snake_case_ = np.array(data['''target'''])
snake_case_ = data['''target_names''']
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(X, y)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
return np.linalg.norm(np.array(SCREAMING_SNAKE_CASE__ ) - np.array(SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any=5 ):
'''simple docstring'''
lowercase__ : Optional[Any] = zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# List of distances of all points from the point to be classified
lowercase__ : Dict = []
for data_point in data:
lowercase__ : Dict = euclidean_distance(data_point[0] , SCREAMING_SNAKE_CASE__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowercase__ : Dict = [i[1] for i in sorted(SCREAMING_SNAKE_CASE__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowercase__ : Any = Counter(SCREAMING_SNAKE_CASE__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 214 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = {}
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if """lm_head""" in full_key else value[0]
UpperCAmelCase_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = """weight"""
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return is_used
return is_used
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase__ = name.split(""".""" )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" )
UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 346 | 0 |
def _UpperCamelCase ( lowercase__ ):
if not head:
return True
# split the list to two parts
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = head.next, head
while fast and fast.next:
__SCREAMING_SNAKE_CASE : Tuple = fast.next.next
__SCREAMING_SNAKE_CASE : List[Any] = slow.next
__SCREAMING_SNAKE_CASE : Tuple = slow.next
__SCREAMING_SNAKE_CASE : int = None # Don't forget here! But forget still works!
# reverse the second part
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
while second:
__SCREAMING_SNAKE_CASE : Union[str, Any] = second.next
__SCREAMING_SNAKE_CASE : Optional[int] = node
__SCREAMING_SNAKE_CASE : int = second
__SCREAMING_SNAKE_CASE : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__SCREAMING_SNAKE_CASE : int = node.next
__SCREAMING_SNAKE_CASE : int = head.next
return True
def _UpperCamelCase ( lowercase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__SCREAMING_SNAKE_CASE : List[Any] = head
while fast and fast.next:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
__SCREAMING_SNAKE_CASE : int = [slow.val]
while slow.next:
__SCREAMING_SNAKE_CASE : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__SCREAMING_SNAKE_CASE : Any = cur.next
return True
def _UpperCamelCase ( lowercase__ ):
if not head or not head.next:
return True
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : int = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE : List[str] = [pos]
__SCREAMING_SNAKE_CASE : List[Any] = head.next
pos += 1
__SCREAMING_SNAKE_CASE : Optional[Any] = pos - 1
__SCREAMING_SNAKE_CASE : str = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE__ ) % 2 != 0:
middle += 1
else:
__SCREAMING_SNAKE_CASE : Dict = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 9 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177 |
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = factor * value
UpperCAmelCase__ = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 346 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDPMScheduler,)
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> str:
lowercase__ : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCAmelCase( self ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Any:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCAmelCase( self ) -> Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Any:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**_UpperCAmelCase )
lowercase__ : Dict = len(_UpperCAmelCase )
lowercase__ : Dict = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
lowercase__ : Union[str, Any] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowercase__ : List[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase__ : str = scheduler_class(**_UpperCAmelCase )
lowercase__ : List[Any] = len(_UpperCAmelCase )
lowercase__ : Optional[Any] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
lowercase__ : Dict = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
lowercase__ : Tuple = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowercase__ : Tuple = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : int = pred_prev_sample
lowercase__ : int = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**_UpperCAmelCase )
lowercase__ : Tuple = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
lowercase__ : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
lowercase__ : str = -1
else:
lowercase__ : Dict = timesteps[i + 1]
lowercase__ : Tuple = scheduler.previous_timestep(_UpperCAmelCase )
lowercase__ : Union[str, Any] = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**_UpperCAmelCase )
lowercase__ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**_UpperCAmelCase )
lowercase__ : List[Any] = [100, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**_UpperCAmelCase )
lowercase__ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 198 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" )
UpperCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 346 | 0 |
import math
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase__ : Tuple = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=1 , **__lowerCAmelCase ) -> Optional[int]:
UpperCamelCase__ : Dict = factor * value
UpperCamelCase__ : List[Any] = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value | 189 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 | 0 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase__ = get_logger()
UpperCamelCase__ = None
class a__ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self , _A=None , _A=None , **_A ):
"""simple docstring"""
super().__init__(features=_UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(_UpperCAmelCase )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
__lowerCAmelCase = device if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
__lowerCAmelCase = str(jax.devices()[0] )
__lowerCAmelCase = jnp_array_kwargs
@staticmethod
def __SCREAMING_SNAKE_CASE( ):
"""simple docstring"""
import jax
return {str(_UpperCAmelCase ): device for device in jax.devices()}
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and column:
if all(
isinstance(_UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_UpperCAmelCase , axis=0 )
return column
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_UpperCAmelCase , (str, bytes, type(_UpperCAmelCase )) ):
return value
elif isinstance(_UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__lowerCAmelCase = {}
if isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__lowerCAmelCase = {"dtype": jnp.intaa}
else:
__lowerCAmelCase = {"dtype": jnp.intaa}
elif isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__lowerCAmelCase = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
__lowerCAmelCase = np.asarray(_UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_UpperCAmelCase , "__array__" ) and not isinstance(_UpperCAmelCase , jax.Array ):
__lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _UpperCAmelCase , map_list=_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.numpy_arrow_extractor().extract_row(_UpperCAmelCase )
__lowerCAmelCase = self.python_features_decoder.decode_row(_UpperCAmelCase )
return self.recursive_tensorize(_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.numpy_arrow_extractor().extract_column(_UpperCAmelCase )
__lowerCAmelCase = self.python_features_decoder.decode_column(_UpperCAmelCase , pa_table.column_names[0] )
__lowerCAmelCase = self.recursive_tensorize(_UpperCAmelCase )
__lowerCAmelCase = self._consolidate(_UpperCAmelCase )
return column
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(_UpperCAmelCase )
__lowerCAmelCase = self.python_features_decoder.decode_batch(_UpperCAmelCase )
__lowerCAmelCase = self.recursive_tensorize(_UpperCAmelCase )
for column_name in batch:
__lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 92 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,)
lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 346 | 0 |
import re
from filelock import FileLock
try:
import nltk
lowercase__ : List[Any] = True
except (ImportError, ModuleNotFoundError):
lowercase__ : List[str] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) )
| 187 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """vivit"""
def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = tubelet_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
super().__init__(**_UpperCAmelCase )
| 346 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , SCREAMING_SNAKE_CASE__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , SCREAMING_SNAKE_CASE__ ) is None
with patch_submodule(_test_patching , '''len''' , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE__ ):
pass
| 104 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: int = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Any = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE__ ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( __snake_case ) -> str:
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" ,SCREAMING_SNAKE_CASE__ ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ ,PIL.Image.Image ):
__lowerCAmelCase : str = [image]
if isinstance(image[0] ,PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = image[0].size
__lowerCAmelCase , __lowerCAmelCase : Tuple = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase : str = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
__lowerCAmelCase : Any = np.concatenate(SCREAMING_SNAKE_CASE__ ,axis=0 )
__lowerCAmelCase : str = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
__lowerCAmelCase : List[str] = image.transpose(0 ,3 ,1 ,2 )
__lowerCAmelCase : List[Any] = 2.0 * image - 1.0
__lowerCAmelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] ,torch.Tensor ):
__lowerCAmelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE__ ,dim=0 )
return image
def _lowercase ( __snake_case ) -> Any:
if isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor ):
return mask
elif isinstance(SCREAMING_SNAKE_CASE__ ,PIL.Image.Image ):
__lowerCAmelCase : Optional[int] = [mask]
if isinstance(mask[0] ,PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Any = mask[0].size
__lowerCAmelCase , __lowerCAmelCase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase : Tuple = [np.array(m.convert("L" ).resize((w, h) ,resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
__lowerCAmelCase : Optional[Any] = np.concatenate(SCREAMING_SNAKE_CASE__ ,axis=0 )
__lowerCAmelCase : Any = mask.astype(np.floataa ) / 255.0
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(mask[0] ,torch.Tensor ):
__lowerCAmelCase : Any = torch.cat(SCREAMING_SNAKE_CASE__ ,dim=0 )
return mask
class A__ ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase)
@torch.no_grad()
def __call__( self: int , _SCREAMING_SNAKE_CASE: Union[torch.Tensor, PIL.Image.Image] , _SCREAMING_SNAKE_CASE: Union[torch.Tensor, PIL.Image.Image] , _SCREAMING_SNAKE_CASE: int = 250 , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 10 , _SCREAMING_SNAKE_CASE: int = 10 , _SCREAMING_SNAKE_CASE: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = image
__lowerCAmelCase : Union[str, Any] = _preprocess_image(_UpperCAmelCase)
__lowerCAmelCase : Union[str, Any] = original_image.to(device=self.device , dtype=self.unet.dtype)
__lowerCAmelCase : Dict = _preprocess_mask(_UpperCAmelCase)
__lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype)
__lowerCAmelCase : Optional[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase) and len(_UpperCAmelCase) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_UpperCAmelCase)}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
__lowerCAmelCase : List[Any] = original_image.shape
__lowerCAmelCase : Union[str, Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.device)
__lowerCAmelCase : str = eta
__lowerCAmelCase : int = self.scheduler.timesteps[0] + 1
__lowerCAmelCase : str = generator[0] if isinstance(_UpperCAmelCase , _UpperCAmelCase) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
__lowerCAmelCase : List[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase : Optional[int] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase : str = self.scheduler.undo_step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__lowerCAmelCase : List[str] = t
__lowerCAmelCase : Any = (image / 2 + 0.5).clamp(0 , 1)
__lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__lowerCAmelCase : Optional[Any] = self.numpy_to_pil(_UpperCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase) | 269 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
UpperCAmelCase__ = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples["""text"""] )
return fn
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
UpperCAmelCase__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE__ : int ):
# Concatenate all texts.
UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase__ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase__ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 346 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowercase ( _UpperCamelCase, _UpperCamelCase=None ) ->List[Any]:
"""simple docstring"""
lowercase : int = None
if token is not None:
lowercase : Dict = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
lowercase : Optional[int] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase : str = requests.get(SCREAMING_SNAKE_CASE__, headers=SCREAMING_SNAKE_CASE__ ).json()
lowercase : List[Any] = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowercase : Any = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = requests.get(url + f"""&page={i + 2}""", headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def __lowercase ( _UpperCamelCase, _UpperCamelCase=None ) ->int:
"""simple docstring"""
lowercase : Any = None
if token is not None:
lowercase : int = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
lowercase : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowercase : int = requests.get(SCREAMING_SNAKE_CASE__, headers=SCREAMING_SNAKE_CASE__ ).json()
lowercase : List[str] = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
lowercase : Tuple = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = requests.get(url + f"""&page={i + 2}""", headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Union[str, Any] = None
if token is not None:
lowercase : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
lowercase : List[Any] = requests.get(SCREAMING_SNAKE_CASE__, headers=SCREAMING_SNAKE_CASE__, allow_redirects=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = result.headers['''Location''']
lowercase : List[Any] = requests.get(SCREAMING_SNAKE_CASE__, allow_redirects=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__, f"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as fp:
fp.write(response.content )
def __lowercase ( _UpperCamelCase, _UpperCamelCase=None ) ->Dict:
"""simple docstring"""
lowercase : List[Any] = []
lowercase : Optional[Any] = []
lowercase : List[str] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowercase : Tuple = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase : List[str] = line[: line.index(''': ''' )]
lowercase : str = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
lowercase : Optional[Any] = line[len('''FAILED ''' ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowercase : List[str] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
f"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
lowercase : Union[str, Any] = None
if job_name and job_links:
lowercase : Dict = job_links.get(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowercase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )]
return result
def __lowercase ( _UpperCamelCase, _UpperCamelCase=None ) ->str:
"""simple docstring"""
lowercase : List[Any] = []
lowercase : Optional[int] = [os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__, job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def __lowercase ( _UpperCamelCase, _UpperCamelCase=None ) ->str:
"""simple docstring"""
lowercase : str = Counter()
counter.update([x[1] for x in logs] )
lowercase : Any = counter.most_common()
lowercase : int = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase : List[Any] = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase : Tuple = dict(sorted(r.items(), key=lambda _UpperCamelCase : item[1]["count"], reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def __lowercase ( _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : int = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
lowercase : Optional[Any] = test.split('''/''' )[2]
else:
lowercase : Dict = None
return test
def __lowercase ( _UpperCamelCase, _UpperCamelCase=None ) ->Dict:
"""simple docstring"""
lowercase : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase : Union[str, Any] = [x for x in logs if x[2] is not None]
lowercase : Union[str, Any] = {x[2] for x in logs}
lowercase : Optional[Any] = {}
for test in tests:
lowercase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase : Union[str, Any] = counter.most_common()
lowercase : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase : str = sum(error_counts.values() )
if n_errors > 0:
lowercase : str = {'''count''': n_errors, '''errors''': error_counts}
lowercase : Optional[int] = dict(sorted(r.items(), key=lambda _UpperCamelCase : item[1]["count"], reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : List[str] = '''| no. | error | status |'''
lowercase : Optional[int] = '''|-:|:-|:-|'''
lowercase : Optional[int] = [header, sep]
for error in reduced_by_error:
lowercase : Optional[Any] = reduced_by_error[error]['''count''']
lowercase : List[Any] = f"""| {count} | {error[:100]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Dict = '''| model | no. of errors | major error | count |'''
lowercase : Union[str, Any] = '''|-:|-:|-:|-:|'''
lowercase : Dict = [header, sep]
for model in reduced_by_model:
lowercase : int = reduced_by_model[model]['''count''']
lowercase , lowercase : Dict = list(reduced_by_model[model]['''errors'''].items() )[0]
lowercase : Tuple = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
__a = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__a = get_job_links(args.workflow_run_id, token=args.token)
__a = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__a = k.find(''' / ''')
__a = k[index + len(''' / ''') :]
__a = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__a = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__a = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__a = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__a = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__a = reduce_by_error(errors)
__a = reduce_by_model(errors)
__a = make_github_table(reduced_by_error)
__a = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 337 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ = '\\n\n'
UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = """cuda"""
else:
UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model.to(_UpperCAmelCase )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
UpperCAmelCase__ = encodings["""input_ids"""]
UpperCAmelCase__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 346 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=99 , a=13 , a=7 , a=9 , a=True , a=True , a=False , a=32 , a=5 , a=4 , a=37 , a=8 , a=0.1 , a=0.002 , a=1 , a=0 , a=0 , a=None , a=None , ):
lowercase__ : Dict = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : int = encoder_seq_length
lowercase__ : List[str] = decoder_seq_length
# For common tests
lowercase__ : Optional[Any] = self.decoder_seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Union[str, Any] = use_attention_mask
lowercase__ : int = use_labels
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Optional[int] = d_ff
lowercase__ : Tuple = relative_attention_num_buckets
lowercase__ : Optional[Any] = dropout_rate
lowercase__ : Optional[int] = initializer_factor
lowercase__ : Tuple = eos_token_id
lowercase__ : List[Any] = pad_token_id
lowercase__ : Tuple = decoder_start_token_id
lowercase__ : Optional[Any] = None
lowercase__ : Union[str, Any] = decoder_layers
def snake_case_ ( self):
return TaConfig.from_pretrained('google/umt5-base')
def snake_case_ ( self , a , a , a , a=None , a=None , a=None , a=None , a=None , ):
if attention_mask is None:
lowercase__ : Optional[int] = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
lowercase__ : List[str] = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
lowercase__ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_UpperCAmelCase)
if decoder_head_mask is None:
lowercase__ : List[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase)
if cross_attn_head_mask is None:
lowercase__ : int = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case_ ( self):
lowercase__ : str = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
lowercase__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase__ : List[str] = input_ids.clamp(self.pad_token_id + 1)
lowercase__ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1)
lowercase__ : Tuple = self.get_config()
lowercase__ : Tuple = config.num_attention_heads
lowercase__ : Any = self.prepare_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return config, input_dict
def snake_case_ ( self):
lowercase__ , lowercase__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_ ( self):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case_ ( self):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case_ ( self , a , a , a , a , a , a , ):
lowercase__ : List[Any] = UMTaModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
lowercase__ : Tuple = model(
input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , )
lowercase__ : List[str] = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase)
lowercase__ : Optional[Any] = result.last_hidden_state
lowercase__ : Any = result.past_key_values
lowercase__ : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_UpperCAmelCase) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def snake_case_ ( self , a , a , a , a , a , a , ):
lowercase__ : Dict = UMTaModel(config=_UpperCAmelCase).get_decoder().to(_UpperCAmelCase).eval()
# first forward pass
lowercase__ : Dict = model(_UpperCAmelCase , use_cache=_UpperCAmelCase)
lowercase__ : str = model(_UpperCAmelCase)
lowercase__ : Optional[Any] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase)
self.parent.assertTrue(len(_UpperCAmelCase) == len(_UpperCAmelCase))
self.parent.assertTrue(len(_UpperCAmelCase) == len(_UpperCAmelCase) + 1)
lowercase__ , lowercase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
lowercase__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase__ : List[str] = model(_UpperCAmelCase)['last_hidden_state']
lowercase__ : Optional[int] = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase)['last_hidden_state']
# select random slice
lowercase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase__ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3))
def snake_case_ ( self , a , a , ):
lowercase__ : List[Any] = UMTaModel(config=_UpperCAmelCase).to(_UpperCAmelCase).half().eval()
lowercase__ : Dict = model(**_UpperCAmelCase)['last_hidden_state']
self.parent.assertFalse(torch.isnan(_UpperCAmelCase).any().item())
@require_torch
class SCREAMING_SNAKE_CASE__ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__lowerCamelCase : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCamelCase : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : Any = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = False
__lowerCamelCase : Any = False
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCamelCase : List[Any] = [0.8, 0.9]
def snake_case_ ( self):
lowercase__ : Union[str, Any] = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def snake_case_ ( self):
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
lowercase__ : Tuple = UMTaModel(config_and_inputs[0]).to(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=_UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def snake_case_ ( self):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_UpperCAmelCase)
def snake_case_ ( self):
lowercase__ : Any = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
lowercase__ : Union[str, Any] = config_and_inputs[0]
lowercase__ : str = UMTaForConditionalGeneration(_UpperCAmelCase).eval()
model.to(_UpperCAmelCase)
lowercase__ : Dict = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_UpperCAmelCase),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase),
}
for attn_name, (name, mask) in zip(_UpperCAmelCase , head_masking.items()):
lowercase__ : List[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase)
lowercase__ : Optional[Any] = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_UpperCAmelCase , return_dict_in_generate=_UpperCAmelCase , **_UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase__ : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def snake_case_ ( self):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def snake_case_ ( self):
lowercase__ : Optional[int] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_UpperCAmelCase).to(_UpperCAmelCase)
lowercase__ : List[Any] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_UpperCAmelCase , legacy=_UpperCAmelCase)
lowercase__ : Tuple = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowercase__ : Optional[int] = tokenizer(_UpperCAmelCase , return_tensors='pt' , padding=_UpperCAmelCase).input_ids
# fmt: off
lowercase__ : Dict = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
])
# fmt: on
torch.testing.assert_allclose(_UpperCAmelCase , _UpperCAmelCase)
lowercase__ : Union[str, Any] = model.generate(input_ids.to(_UpperCAmelCase))
lowercase__ : int = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowercase__ : List[str] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
| 214 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if gpta_config_file == "":
__SCREAMING_SNAKE_CASE : List[Any] = GPTaConfig()
else:
__SCREAMING_SNAKE_CASE : Tuple = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE : List[Any] = GPTaModel(SCREAMING_SNAKE_CASE__ )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
__SCREAMING_SNAKE_CASE : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 9 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_image(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 346 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase (lowerCamelCase_ ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase :Optional[Any] = """LayoutLMv2ImageProcessor"""
_UpperCAmelCase :List[Any] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
lowercase__: str = kwargs.pop('''feature_extractor''' )
lowercase__: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase__: List[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__: Any = features['''words''']
lowercase__: List[str] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
lowercase__: Any = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase__: List[Any] = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase__: Dict = images
return encoded_inputs
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCAmelCase , )
return self.image_processor
| 177 |
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ):
'''simple docstring'''
UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 346 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase="None" , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ) -> Any:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = seq_length
lowercase__ : Dict = is_training
lowercase__ : int = use_input_mask
lowercase__ : List[str] = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : int = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Optional[int] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : Dict = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : List[str] = relative_attention
lowercase__ : Optional[int] = position_biased_input
lowercase__ : Optional[int] = pos_att_type
lowercase__ : Union[str, Any] = scope
def _lowerCAmelCase( self ) -> int:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Dict = None
lowercase__ : Union[str, Any] = None
lowercase__ : Union[str, Any] = None
if self.use_labels:
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : int = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
lowercase__ : int = TFDebertaVaModel(config=_UpperCAmelCase )
lowercase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ : Any = [input_ids, input_mask]
lowercase__ : Any = model(_UpperCAmelCase )
lowercase__ : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : List[str] = TFDebertaVaForMaskedLM(config=_UpperCAmelCase )
lowercase__ : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : str = self.num_labels
lowercase__ : Tuple = TFDebertaVaForSequenceClassification(config=_UpperCAmelCase )
lowercase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
lowercase__ : str = self.num_labels
lowercase__ : Dict = TFDebertaVaForTokenClassification(config=_UpperCAmelCase )
lowercase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=_UpperCAmelCase )
lowercase__ : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Tuple = config_and_inputs
lowercase__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Tuple = TFDebertaVaModelTester(self )
lowercase__ : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[Any] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def _lowerCAmelCase( self ) -> Any:
pass
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Tuple = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
lowercase__ : Any = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase__ : List[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__ : str = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 )
| 198 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346 | 0 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def __lowercase ( *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
UpperCamelCase__ : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
UpperCamelCase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Any = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __a ( unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCAmelCase : Dict = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCamelCase__ : List[Any] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ : List[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = "facebook/sam-vit-huge"
UpperCamelCase__ : Union[str, Any] = pipeline("mask-generation" , model=_UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
] , ) | 189 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 | 0 |