code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from itertools import product
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = sides_number
a_ = max_face_number * dice_number
a_ = [0] * (max_total + 1)
a_ = 1
a_ = range(_UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCAmelCase , repeat=_UpperCAmelCase ):
a_ = sum(_UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a_ = 0
a_ = 9
a_ = 4 * 9
a_ = 6
for peter_total in range(_UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a_ = (4**9) * (6**6)
a_ = peter_wins_count / total_games_number
a_ = round(_UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''') | 483 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """autoformer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : bool = True , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 64 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 32 , a_ : int = 32 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 100 , a_ : float = 0.02 , a_ : bool = True , a_ : Union[str, Any]=True , a_ : int = 10 , a_ : int = 25 , a_ : int = 3 , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = prediction_length
__snake_case = context_length if context_length is not None else prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Autoformer
__snake_case = label_length
__snake_case = moving_average
__snake_case = autocorrelation_factor
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 69 | 0 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowercase : Tuple = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
lowercase : Any = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowercase : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : List[str] = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowercase : List[Any] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : Union[str, Any] = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
lowercase : int = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : Tuple = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
lowercase : int = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : Tuple = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
lowercase : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : int = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
lowercase : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
lowercase : Any = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
lowercase : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
lowercase : List[str] = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
lowercase : List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
lowercase : Dict = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
lowercase : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : Dict = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
lowercase : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
lowercase : Tuple = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
lowercase : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
lowercase : Tuple = ''
lowercase : List[str] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
lowercase : List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase : Optional[int] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __a ( A__ , A__ ) -> Any:
assert ReadMe.from_string(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __a ( A__ , A__ ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path="root" ) ) ):
lowerCAmelCase = ReadMe.from_string(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A__ , A__ ) -> Tuple:
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A__ ) -> Optional[int]:
ReadMe.from_string(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , suppress_parsing_errors=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __a ( A__ , A__ ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ) / "README.md"
with open(_SCREAMING_SNAKE_CASE , "w+" ) as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ReadMe.from_readme(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __a ( A__ , A__ ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ) / "README.md"
with open(_SCREAMING_SNAKE_CASE , "w+" ) as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = expected_error.format(path=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = ReadMe.from_readme(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A__ , A__ ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ) / "README.md"
with open(_SCREAMING_SNAKE_CASE , "w+" ) as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = expected_error.format(path=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A__ ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ) / "README.md"
with open(_SCREAMING_SNAKE_CASE , "w+" ) as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE )
ReadMe.from_readme(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , suppress_parsing_errors=_SCREAMING_SNAKE_CASE )
| 702 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( A__ ) -> Any:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __a ( A__ , A__ ) -> List[str]:
lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCAmelCase = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCAmelCase = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCAmelCase = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCAmelCase = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCAmelCase = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCAmelCase = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCAmelCase = key.replace("image_encoder.module" , "flava.image_model" )
lowerCAmelCase = key.replace("text_encoder.module" , "flava.text_model" )
lowerCAmelCase = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCAmelCase = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCAmelCase = key.replace("text_projection" , "flava.text_projection" )
lowerCAmelCase = key.replace("image_projection" , "flava.image_projection" )
lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase = value
return upgrade
@torch.no_grad()
def __a ( A__ , A__ , A__ , A__=None ) -> str:
if config_path is not None:
lowerCAmelCase = FlavaConfig.from_pretrained(A__ )
else:
lowerCAmelCase = FlavaConfig()
lowerCAmelCase = FlavaForPreTraining(A__ ).eval()
lowerCAmelCase = convert_dalle_checkpoint(A__ , A__ , save_checkpoint=A__ )
if os.path.exists(A__ ):
lowerCAmelCase = torch.load(A__ , map_location="cpu" )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" )
lowerCAmelCase = upgrade_state_dict(A__ , A__ )
hf_model.load_state_dict(A__ )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(A__ )
lowerCAmelCase = count_parameters(A__ ) + count_parameters(A__ )
assert torch.allclose(A__ , A__ , atol=1e-3 )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase : List[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 159 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Any = """falcon"""
lowerCAmelCase__ : str = ["""past_key_values"""]
def __init__(self : Optional[int] , UpperCamelCase : Optional[Any]=65024 , UpperCamelCase : List[Any]=4544 , UpperCamelCase : Dict=32 , UpperCamelCase : List[str]=71 , UpperCamelCase : List[Any]=1E-5 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : List[Any]=None , UpperCamelCase : int=False , UpperCamelCase : Dict=False , UpperCamelCase : Any=True , UpperCamelCase : str=True , UpperCamelCase : Optional[Any]=False , UpperCamelCase : List[str]=11 , UpperCamelCase : Union[str, Any]=11 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop('''n_embed''' , UpperCamelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return not self.alibi
| 460 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
lowerCamelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
lowerCamelCase : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """whisper"""
lowerCAmelCase__ : Dict = ["""past_key_values"""]
lowerCAmelCase__ : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : int , UpperCamelCase : Optional[int]=51865 , UpperCamelCase : Any=80 , UpperCamelCase : Dict=6 , UpperCamelCase : str=4 , UpperCamelCase : Optional[Any]=6 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Tuple=1536 , UpperCamelCase : Dict=1536 , UpperCamelCase : Any=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : int=50257 , UpperCamelCase : List[str]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Dict=256 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : List[Any]=False , UpperCamelCase : int=1500 , UpperCamelCase : List[str]=448 , UpperCamelCase : int=50256 , UpperCamelCase : Optional[int]=50256 , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=[220, 50256] , UpperCamelCase : Optional[Any]=False , UpperCamelCase : int=256 , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=0.05 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=10 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Union[str, Any]=7 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = {0: '''batch'''}
else:
lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' )
return common_inputs
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , UpperCamelCase : int = 22050 , UpperCamelCase : float = 5.0 , UpperCamelCase : int = 220 , ):
'''simple docstring'''
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
lowercase__ = encoder_inputs['''input_features'''].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowercase__ = encoder_inputs.pop('''input_features''' )
lowercase__ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return 1E-3
| 460 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (KDPMaDiscreteScheduler,)
UpperCamelCase__ = 10
def UpperCAmelCase_ ( self: Dict , **__lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__: str = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__: Optional[int] = self.dummy_model()
UpperCamelCase__: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__: int = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__: Tuple = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: int = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Optional[int] = output.prev_sample
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: str = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if torch_device == "mps":
return
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: List[Any] = self.get_scheduler_config()
UpperCamelCase__: Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__: Union[str, Any] = self.dummy_model()
UpperCamelCase__: str = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__: Tuple = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__: int = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Any = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = output.prev_sample
UpperCamelCase__: int = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
if torch_device == "mps":
return
UpperCamelCase__: List[str] = self.scheduler_classes[0]
UpperCamelCase__: Dict = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase )
UpperCamelCase__: Optional[int] = self.dummy_model()
UpperCamelCase__: Any = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase__: Dict = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: int = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Optional[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = output.prev_sample
UpperCamelCase__: Any = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: int = torch.mean(torch.abs(__lowerCamelCase ) )
if str(__lowerCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 718 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A__: Any = None
A__: Optional[int] = logging.get_logger(__name__)
A__: str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__: List[Any] = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A__: Optional[int] = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
UpperCamelCase__ = TaTokenizer
UpperCamelCase__ = []
def __init__( self: Optional[int] , __lowerCamelCase: Tuple=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: str="</s>" , __lowerCamelCase: List[Any]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: Optional[Any]=100 , __lowerCamelCase: List[str]=None , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__: str = [F"<extra_id_{i}>" for i in range(__lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCamelCase__: Union[str, Any] = len(set(filter(lambda __lowerCamelCase : bool("extra_id_" in str(__lowerCamelCase ) ) , __lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , extra_ids=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: str = vocab_file
UpperCamelCase__: Any = False if not self.vocab_file else True
UpperCamelCase__: Tuple = extra_ids
@staticmethod
def UpperCAmelCase_ ( __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCamelCase__: Tuple = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __lowerCamelCase , )
return max_model_length
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__: Optional[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
logger.info(F"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCamelCase__: int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return list(
set(filter(lambda __lowerCamelCase : bool(re.search(R"<extra_id_\d+>" , __lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__lowerCamelCase ) for token in self.get_sentinel_tokens()]
| 221 | 0 |
import requests
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = {"Content-Type": "application/json"}
_lowerCAmelCase : Any = requests.post(lowerCAmelCase__ , json={"text": message_body} , headers=lowerCAmelCase__ )
if response.status_code != 2_00:
_lowerCAmelCase : str = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 424 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in cache
a_ = 10 # Maximum capacity of cache
def __init__( self , _snake_case ):
_lowerCAmelCase : Tuple = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : Any = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCAmelCase : List[str] = n
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Optional[int] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for k in self.dq_store:
print(_snake_case )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_lowercase ) )
_UpperCAmelCase = os.path.join(_lowercase , 'words.txt' )
_UpperCAmelCase = ''
with open(_lowercase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowercase )
if __name__ == "__main__":
print(solution())
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__A : List[Any] = getLogger(__name__)
__A : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
def __a ( A__ : List[str] , A__ : str , A__ : str , A__ : int = 8 , A__ : str = DEFAULT_DEVICE , A__ : List[str]=False , A__ : Tuple="summarization" , A__ : int=None , **A__ : List[Any] , ):
SCREAMING_SNAKE_CASE = Path(A__ ).open("w" , encoding="utf-8" )
SCREAMING_SNAKE_CASE = str(A__ )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
SCREAMING_SNAKE_CASE = model.half()
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
SCREAMING_SNAKE_CASE = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE = tokenizer(A__ , return_tensors="pt" , truncation=A__ , padding="longest" ).to(A__ )
SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __a ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def __a ( A__ : List[Any]=True ):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("model_name" , type=A__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=A__ , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=A__ , help="where to save summaries" )
parser.add_argument("--reference_path" , type=A__ , required=A__ , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=A__ , required=A__ , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=A__ , required=A__ , default=A__ , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=A__ , required=A__ , default=A__ , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=A__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=A__ , default=8 , required=A__ , help="batch size" )
parser.add_argument(
"--n_obs" , type=A__ , default=-1 , required=A__ , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=A__ , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_known_args()
SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
SCREAMING_SNAKE_CASE = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
SCREAMING_SNAKE_CASE = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE = calculate_bleu if "translation" in args.task else calculate_rouge
SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
SCREAMING_SNAKE_CASE = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
SCREAMING_SNAKE_CASE = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 16 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A__ ( A : List[str] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Tuple = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase : Optional[Any] = model_name.find("patch")
UpperCamelCase : Optional[int] = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2])
UpperCamelCase : Optional[Any] = XCLIPVisionConfig(patch_size=A , num_frames=A)
if "large" in model_name:
UpperCamelCase : Any = 7_68
UpperCamelCase : Any = 30_72
UpperCamelCase : Tuple = 12
UpperCamelCase : List[str] = 10_24
UpperCamelCase : Tuple = 40_96
UpperCamelCase : Tuple = 16
UpperCamelCase : Union[str, Any] = 24
UpperCamelCase : List[Any] = 7_68
UpperCamelCase : Optional[int] = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase : Any = 3_36
UpperCamelCase : Any = XCLIPConfig.from_text_vision_configs(A , A)
if "large" in model_name:
UpperCamelCase : List[Any] = 7_68
return config
def A__ ( A : str):
'''simple docstring'''
if name == "token_embedding.weight":
UpperCamelCase : str = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight")
if name == "positional_embedding":
UpperCamelCase : int = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight")
if "ln_1" in name:
UpperCamelCase : int = name.replace("ln_1" , "layer_norm1")
if "ln_2" in name:
UpperCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2")
if "c_fc" in name:
UpperCamelCase : List[Any] = name.replace("c_fc" , "fc1")
if "c_proj" in name:
UpperCamelCase : Optional[Any] = name.replace("c_proj" , "fc2")
if name.startswith("transformer.resblocks"):
UpperCamelCase : Optional[Any] = name.replace("transformer.resblocks" , "text_model.encoder.layers")
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase : Dict = name.replace("attn.out_proj" , "self_attn.out_proj")
if "ln_final" in name:
UpperCamelCase : Optional[int] = name.replace("ln_final" , "text_model.final_layer_norm")
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase : Tuple = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding")
if name == "visual.positional_embedding":
UpperCamelCase : int = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight")
if name.startswith("visual.transformer.resblocks"):
UpperCamelCase : int = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers")
if "visual.conv1" in name:
UpperCamelCase : int = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding")
if "visual.ln_pre" in name:
UpperCamelCase : List[str] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm")
if "visual.ln_post" in name:
UpperCamelCase : Optional[Any] = name.replace("visual.ln_post" , "vision_model.post_layernorm")
if "visual.proj" in name:
UpperCamelCase : Union[str, Any] = name.replace("visual.proj" , "visual_projection.weight")
if "text_projection" in name:
UpperCamelCase : Optional[Any] = name.replace("text_projection" , "text_projection.weight")
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase : Tuple = name.replace("prompts_visual_proj" , "prompts_visual_projection")
if "prompts_visual_ln" in name:
UpperCamelCase : str = name.replace("prompts_visual_ln" , "prompts_visual_layernorm")
# mit
if name == "mit.positional_embedding":
UpperCamelCase : int = name.replace("positional" , "position")
if name.startswith("mit.resblocks"):
UpperCamelCase : Any = name.replace("mit.resblocks" , "mit.encoder.layers")
# prompts generator
if name.startswith("prompts_generator.norm"):
UpperCamelCase : List[str] = name.replace("prompts_generator.norm" , "prompts_generator.layernorm")
return name
def A__ ( A : Optional[int] , A : str):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase : List[str] = orig_state_dict.pop(A)
if "attn.in_proj" in key:
UpperCamelCase : Union[str, Any] = key.split(".")
if key.startswith("visual"):
UpperCamelCase : Union[str, Any] = key_split[3]
UpperCamelCase : Union[str, Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase : Optional[int] = val[
:dim, :
]
UpperCamelCase : Any = val[
dim : dim * 2, :
]
UpperCamelCase : Dict = val[
-dim:, :
]
else:
UpperCamelCase : List[str] = val[
:dim
]
UpperCamelCase : str = val[
dim : dim * 2
]
UpperCamelCase : str = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase : Tuple = val[
:dim, :
]
UpperCamelCase : Optional[Any] = val[
dim : dim * 2, :
]
UpperCamelCase : Any = val[
-dim:, :
]
else:
UpperCamelCase : Tuple = val[:dim]
UpperCamelCase : List[str] = val[
dim : dim * 2
]
UpperCamelCase : int = val[-dim:]
elif key.startswith("mit"):
UpperCamelCase : List[str] = key_split[2]
UpperCamelCase : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase : Union[str, Any] = val[:dim, :]
UpperCamelCase : Any = val[dim : dim * 2, :]
UpperCamelCase : Any = val[-dim:, :]
else:
UpperCamelCase : List[str] = val[:dim]
UpperCamelCase : int = val[dim : dim * 2]
UpperCamelCase : Tuple = val[-dim:]
else:
UpperCamelCase : Dict = key_split[2]
UpperCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase : str = val[:dim, :]
UpperCamelCase : Dict = val[
dim : dim * 2, :
]
UpperCamelCase : List[str] = val[-dim:, :]
else:
UpperCamelCase : Union[str, Any] = val[:dim]
UpperCamelCase : Optional[Any] = val[
dim : dim * 2
]
UpperCamelCase : List[Any] = val[-dim:]
else:
UpperCamelCase : int = rename_key(A)
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase : int = val.T
UpperCamelCase : Optional[Any] = val
return orig_state_dict
def A__ ( A : Dict):
'''simple docstring'''
if num_frames == 8:
UpperCamelCase : List[Any] = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCamelCase : Optional[int] = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCamelCase : Union[str, Any] = "eating_spaghetti_32_frames.npy"
UpperCamelCase : Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=A , repo_type="dataset" , )
UpperCamelCase : int = np.load(A)
return list(A)
def A__ ( A : Any , A : Any=None , A : str=False):
'''simple docstring'''
UpperCamelCase : str = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCamelCase : Any = model_to_url[model_name]
UpperCamelCase : Optional[int] = 8
if "16-frames" in model_name:
UpperCamelCase : List[Any] = 16
elif "shot" in model_name:
UpperCamelCase : Union[str, Any] = 32
UpperCamelCase : List[str] = get_xclip_config(A , A)
UpperCamelCase : Tuple = XCLIPModel(A)
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase : Dict = "pytorch_model.bin"
gdown.cached_download(A , A , quiet=A)
UpperCamelCase : Union[str, Any] = torch.load(A , map_location="cpu")["model"]
else:
UpperCamelCase : Any = torch.hub.load_state_dict_from_url(A)["model"]
UpperCamelCase : List[str] = convert_state_dict(A , A)
UpperCamelCase : Any = XCLIPModel(A)
UpperCamelCase , UpperCamelCase : Union[str, Any] = model.load_state_dict(A , strict=A)
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase : Any = 3_36 if model_name == "xclip-large-patch14-16-frames" else 2_24
UpperCamelCase : Any = VideoMAEImageProcessor(size=A)
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
UpperCamelCase : Union[str, Any] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32")
UpperCamelCase : List[str] = XCLIPProcessor(image_processor=A , tokenizer=A)
UpperCamelCase : str = prepare_video(A)
UpperCamelCase : List[str] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=A , return_tensors="pt" , padding=A)
print("Shape of pixel values:" , inputs.pixel_values.shape)
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**A)
# Verify outputs
UpperCamelCase : Union[str, Any] = outputs.logits_per_video
UpperCamelCase : Tuple = logits_per_video.softmax(dim=1)
print("Probs:" , A)
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase : int = torch.tensor([[0.0019, 0.9951, 0.0030]])
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase : Tuple = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]])
elif model_name == "xclip-base-patch16":
UpperCamelCase : Union[str, Any] = torch.tensor([[0.0083, 0.9681, 0.0236]])
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase : Union[str, Any] = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]])
elif model_name == "xclip-large-patch14":
UpperCamelCase : List[Any] = torch.tensor([[0.0062, 0.9864, 0.0075]])
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase : int = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]])
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase : List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]])
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase : Tuple = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]])
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase : Tuple = torch.tensor([[0.0036, 0.9920, 0.0045]])
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase : str = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]])
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase : Optional[Any] = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]])
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase : str = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]])
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase : str = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]])
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase : Optional[Any] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]])
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase : Tuple = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]])
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase : str = torch.tensor([[0.0027, 0.9904, 0.0070]])
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase : Union[str, Any] = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]])
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase : Union[str, Any] = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]])
else:
raise ValueError(F'''Model name {model_name} not supported''')
assert torch.allclose(A , A , atol=1E-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(A)
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub...")
model.push_to_hub(A , organization="nielsr")
processor.push_to_hub(A , organization="nielsr")
slow_tokenizer.push_to_hub(A , organization="nielsr")
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 435 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.nn.Linear(2 , 4)
UpperCamelCase : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0)
UpperCamelCase : List[Any] = torch.optim.lr_scheduler.OneCycleLR(A , max_lr=0.01 , steps_per_epoch=2 , epochs=1)
UpperCamelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
UpperCamelCase : List[str] = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def A__ ( A : int):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(A)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : int = Accelerator(cpu=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Dict = Accelerator()
UpperCamelCase : Any = GradientState()
assert state.num_steps == 1
UpperCamelCase : List[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCamelCase : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = create_components()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCamelCase , **lowerCamelCase ):
pass
with patch("torch.cuda.set_device" , lowerCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
UpperCamelCase : Union[str, Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase : str = get_signature(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase : List[Any] = get_signature(lowerCamelCase )
# saving hook
def save_config(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCamelCase : str = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCamelCase , "data.json" ) , "w" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
# loading hook
def load_config(lowerCamelCase , lowerCamelCase ):
with open(os.path.join(lowerCamelCase , "data.json" ) , "r" ) as f:
UpperCamelCase : Optional[int] = json.load(lowerCamelCase )
UpperCamelCase : int = config["class_name"]
UpperCamelCase : Dict = accelerator.register_save_state_pre_hook(lowerCamelCase )
UpperCamelCase : Union[str, Any] = accelerator.register_load_state_pre_hook(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : Any = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = create_components()
UpperCamelCase : int = None
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : List[str] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = create_components()
UpperCamelCase : Union[str, Any] = [1, 2, 3]
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map={"": 0} , )
UpperCamelCase : str = Accelerator()
# This should work
UpperCamelCase : Any = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Optional[Any] = Accelerator()
with init_empty_weights():
UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : Optional[int] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Dict = "cpu"
UpperCamelCase : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCamelCase , load_in_abit=lowerCamelCase , llm_inta_enable_fpaa_cpu_offload=lowerCamelCase )
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : Dict = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Dict = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : List[Any] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
UpperCamelCase : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : Dict = accelerator.prepare(lowerCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
UpperCamelCase : Union[str, Any] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Tuple = 1
UpperCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
UpperCamelCase : Tuple = Accelerator()
# This should work
UpperCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase )
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = torch.nn.Linear(10 , 10 )
UpperCamelCase : Dict = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCamelCase : Optional[Any] = Accelerator(cpu=lowerCamelCase )
UpperCamelCase : Any = accelerator.prepare(lowerCamelCase )
| 435 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase = 6008_5147_5143 ):
"""simple docstring"""
try:
UpperCamelCase = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
UpperCamelCase = 1
UpperCamelCase = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase = i
n //= i
i += 1
if n > 1:
UpperCamelCase = n
return int(_lowercase )
if __name__ == "__main__":
print(f'{solution() = }') | 34 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 296 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a__ ( __magic_name__ ):
lowercase_ = "poolformer"
def __init__( self : List[str] , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4.0 , UpperCamelCase_ : Dict=[2, 2, 6, 2] , UpperCamelCase_ : Dict=[64, 128, 320, 512] , UpperCamelCase_ : Any=[7, 3, 3, 3] , UpperCamelCase_ : Dict=[4, 2, 2, 2] , UpperCamelCase_ : str=[2, 1, 1, 1] , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=1e-5 , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : Optional[int] = stride
__UpperCAmelCase : Tuple = padding
__UpperCAmelCase : Optional[Any] = pool_size
__UpperCAmelCase : Optional[int] = hidden_sizes
__UpperCAmelCase : Tuple = mlp_ratio
__UpperCAmelCase : List[str] = depths
__UpperCAmelCase : int = patch_sizes
__UpperCAmelCase : List[str] = strides
__UpperCAmelCase : Optional[Any] = num_encoder_blocks
__UpperCAmelCase : List[str] = drop_path_rate
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Union[str, Any] = use_layer_scale
__UpperCAmelCase : Tuple = layer_scale_init_value
__UpperCAmelCase : int = initializer_range
super().__init__(**UpperCamelCase_)
class a__ ( __magic_name__ ):
lowercase_ = version.parse("1.11" )
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return 2e-3
| 702 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
__UpperCAmelCase : List[str] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__UpperCAmelCase : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
__UpperCAmelCase : Tuple = primes[:idx]
break
__UpperCAmelCase , __UpperCAmelCase : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__UpperCAmelCase : Optional[int] = False
for r in range(UpperCamelCase ):
__UpperCAmelCase : Dict = pow(UpperCamelCase , d * 2**r , UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__UpperCAmelCase : Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 487 | 0 |
'''simple docstring'''
def UpperCAmelCase ( A : str , A : Dict ):
SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(_a )]
SCREAMING_SNAKE_CASE : Optional[Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(_a ) <= key:
return input_string
for position, character in enumerate(_a ):
SCREAMING_SNAKE_CASE : Tuple = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : str = min(_a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_a )
SCREAMING_SNAKE_CASE : Optional[int] = ["".join(_a ) for row in temp_grid]
SCREAMING_SNAKE_CASE : Union[str, Any] = "".join(_a )
return output_string
def UpperCAmelCase ( A : List[Any] , A : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : int = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(_a )] # generates template
for position in range(len(_a ) ):
SCREAMING_SNAKE_CASE : Dict = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : int = min(_a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
SCREAMING_SNAKE_CASE : Any = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE : Tuple = input_string[counter : counter + len(_a )]
grid.append(list(_a ) )
counter += len(_a )
SCREAMING_SNAKE_CASE : Optional[int] = "" # reads as zigzag
for position in range(len(_a ) ):
SCREAMING_SNAKE_CASE : int = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Union[str, Any] = min(_a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase ( A : List[Any] ):
SCREAMING_SNAKE_CASE : List[str] = {}
for key_guess in range(1 , len(_a ) ): # tries every key
SCREAMING_SNAKE_CASE : Tuple = decrypt(_a , _a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _a( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Optional[str], UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tesseract_config if tesseract_config is not None else ''''''
# apply OCR
SCREAMING_SNAKE_CASE__ : Tuple =to_pil_image(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =pil_image.size
SCREAMING_SNAKE_CASE__ : str =pytesseract.image_to_data(UpperCamelCase__, lang=UpperCamelCase__, output_type='''dict''', config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Tuple =[idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Any =[word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : str =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
for x, y, w, h in zip(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Dict =[x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : Any , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "" , **__lowercase : List[str] , ) -> None:
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize
SCREAMING_SNAKE_CASE__ : Any =size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =resample
SCREAMING_SNAKE_CASE__ : Optional[int] =apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] =ocr_lang
SCREAMING_SNAKE_CASE__ : Optional[int] =tesseract_config
def __magic_name__ ( self : List[str] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : List[Any] =(size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : str , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase )
SCREAMING_SNAKE_CASE__ : int =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] =apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : int =ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Optional[int] =tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : List[str] =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : int =[to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
for image in images:
SCREAMING_SNAKE_CASE__ : List[Any] =apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
SCREAMING_SNAKE_CASE__ : int =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Optional[int] =[flip_channel_order(__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : int =BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =words_batch
SCREAMING_SNAKE_CASE__ : Optional[int] =boxes_batch
return data | 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase ) | 665 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : int = CodeGenTokenizer
a : List[str] = CodeGenTokenizerFast
a : List[Any] = True
a : Optional[Any] = {"add_prefix_space": True}
a : Dict = False
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
UpperCamelCase__ : Dict = dict(zip(__magic_name__, range(len(__magic_name__ ) ) ) )
UpperCamelCase__ : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase__ : Dict = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **__magic_name__ )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = '''lower newer'''
UpperCamelCase__ : int = '''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ : List[Any] = '''lower newer'''
UpperCamelCase__ : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase__ : int = tokenizer.tokenize(__magic_name__, add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = tokens + [tokenizer.unk_token]
UpperCamelCase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ), __magic_name__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
UpperCamelCase__ : List[Any] = '''lower newer'''
# Testing tokenization
UpperCamelCase__ : Tuple = tokenizer.tokenize(__magic_name__, add_prefix_space=__magic_name__ )
UpperCamelCase__ : Tuple = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
# Testing conversion to ids without special tokens
UpperCamelCase__ : int = tokenizer.encode(__magic_name__, add_special_tokens=__magic_name__, add_prefix_space=__magic_name__ )
UpperCamelCase__ : str = rust_tokenizer.encode(__magic_name__, add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
# Testing conversion to ids with special tokens
UpperCamelCase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
UpperCamelCase__ : Dict = tokenizer.encode(__magic_name__, add_prefix_space=__magic_name__ )
UpperCamelCase__ : List[str] = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
# Testing the unknown token
UpperCamelCase__ : Optional[int] = tokens + [rust_tokenizer.unk_token]
UpperCamelCase__ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ), __magic_name__ )
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase__ ( self, __magic_name__=15 ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__magic_name__, **__magic_name__ )
# Simple input
UpperCamelCase__ : int = '''This is a simple input'''
UpperCamelCase__ : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase__ : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__magic_name__, tokenizer_r.encode, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Simple input
self.assertRaises(__magic_name__, tokenizer_r.encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Simple input
self.assertRaises(
__magic_name__, tokenizer_r.batch_encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''', )
# Pair input
self.assertRaises(__magic_name__, tokenizer_r.encode, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Pair input
self.assertRaises(__magic_name__, tokenizer_r.encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Pair input
self.assertRaises(
__magic_name__, tokenizer_r.batch_encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''', )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token='''<pad>''' )
# Simple input
UpperCamelCase__ : Union[str, Any] = '''This is a simple input'''
UpperCamelCase__ : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
UpperCamelCase__ : Any = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase__ : str = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
UpperCamelCase__ : str = tokenizer.pad_token_id
UpperCamelCase__ : int = tokenizer(__magic_name__, padding='''max_length''', max_length=30, return_tensors='''np''' )
UpperCamelCase__ : Union[str, Any] = tokenizer(__magic_name__, padding=__magic_name__, truncate=__magic_name__, return_tensors='''np''' )
UpperCamelCase__ : str = tokenizer(*__magic_name__, padding='''max_length''', max_length=60, return_tensors='''np''' )
UpperCamelCase__ : str = tokenizer(__magic_name__, padding=__magic_name__, truncate=__magic_name__, return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = '''$$$'''
UpperCamelCase__ : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=__magic_name__, add_bos_token=__magic_name__ )
UpperCamelCase__ : Tuple = '''This is a simple input'''
UpperCamelCase__ : int = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase__ : List[Any] = tokenizer.bos_token_id
UpperCamelCase__ : Dict = tokenizer(__magic_name__ )
UpperCamelCase__ : Optional[Any] = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0], __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase__ : Optional[int] = tokenizer.decode(out_s.input_ids )
UpperCamelCase__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
UpperCamelCase__ : Any = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
UpperCamelCase__ : List[str] = '''\nif len_a > len_b: result = a\nelse: result = b'''
UpperCamelCase__ : int = tokenizer.encode(__magic_name__ )
UpperCamelCase__ : Optional[int] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
UpperCamelCase__ : Tuple = tokenizer.decode(__magic_name__, truncate_before_pattern=__magic_name__ )
self.assertEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
pass
| 253 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( _SCREAMING_SNAKE_CASE : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case( A_ , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : str = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCamelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=__lowerCamelCase , )
__A : int = AutoencoderKL()
__A : Optional[int] = DDIMScheduler()
__A : str = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _a ( self , __lowerCamelCase , __lowerCamelCase=0 ):
'''simple docstring'''
if str(__lowerCamelCase ).startswith('mps' ):
__A : Optional[Any] = torch.manual_seed(__lowerCamelCase )
else:
__A : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__A : Tuple = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
'''simple docstring'''
__A : List[Any] = 'cpu'
__A : List[Any] = self.get_dummy_components()
__A : Dict = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Tuple = self.get_dummy_inputs(__lowerCamelCase )
__A : Dict = pipe(**__lowerCamelCase ).images
__A : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__A : Union[str, Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__A : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1e-3 )
def _a ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
'''simple docstring'''
__A : List[Any] = torch.manual_seed(0 )
__A : Tuple = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__A : Union[str, Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__A : Dict = pipe.get_label_ids(__lowerCamelCase )
__A : Optional[int] = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
__A : Optional[int] = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__A : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__A : Optional[int] = ['vase', 'umbrella']
__A : Dict = pipe.get_label_ids(__lowerCamelCase )
__A : Optional[int] = torch.manual_seed(0 )
__A : List[Any] = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
__A : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 237 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[str]:
assert isinstance(__A , __A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[Any]:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = ParquetDatasetReader(__A , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = ParquetDatasetReader(__A , features=__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[Any]:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = ParquetDatasetReader(__A , cache_dir=__A , split=__A ).read()
_check_parquet_dataset(__A , __A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[str]:
if issubclass(__A , __A ):
_snake_case = parquet_path
elif issubclass(__A , __A ):
_snake_case = [parquet_path]
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=("train",) ) -> Dict:
assert isinstance(__A , __A )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Tuple:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[Any]:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = ParquetDatasetReader({'train': parquet_path} , features=__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[str]:
if split:
_snake_case = {split: parquet_path}
else:
_snake_case = 'train'
_snake_case = {'train': parquet_path, 'test': parquet_path}
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Any:
_snake_case = ParquetDatasetWriter(__A , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_snake_case = pq.ParquetFile(tmp_path / 'foo.parquet' )
_snake_case = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[str]:
_snake_case = str(shared_datadir / 'test_image_rgb.jpg' )
_snake_case = {'image': [image_path]}
_snake_case = Features({'image': Image()} )
_snake_case = Dataset.from_dict(__A , features=__A )
_snake_case = ParquetDatasetWriter(__A , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_snake_case = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_snake_case = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=__A ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Union[str, Any]:
assert get_writer_batch_size(__A ) == expected
| 495 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Optional[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 495 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
a_ , a_ , a_ = False, False, False
@dataclass
class snake_case :
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = 'dict'
__UpperCamelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
__UpperCamelCase = field(default='Audio' , init=_UpperCamelCase , repr=_UpperCamelCase)
def __call__( self : int ) -> Any:
'''simple docstring'''
return self.pa_type
def a_ ( self : str , a__ : Union[str, bytes, dict] ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(a__ , a__ ):
return {"bytes": None, "path": value}
elif isinstance(a__ , a__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_A = BytesIO()
sf.write(a__ , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_A = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_A = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_27_67
_A = BytesIO(bytes() )
sf.write(a__ , a__ , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def a_ ( self : Optional[int] , a__ : dict , a__ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_A , _A = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_A = xsplitext(a__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_A = token_per_repo_id or {}
_A = path.split("::" )[-1]
try:
_A = string_to_dict(a__ , config.HUB_DATASETS_URL )["repo_id"]
_A = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_A = None
with xopen(a__ , "rb" , use_auth_token=a__ ) as f:
_A , _A = sf.read(a__ )
else:
_A , _A = sf.read(a__ )
_A = array.T
if self.mono:
_A = librosa.to_mono(a__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_A = librosa.resample(a__ , orig_sr=a__ , target_sr=self.sampling_rate )
_A = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def a_ ( self : Dict ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def a_ ( self : Union[str, Any] , a__ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
_A = pa.array([None] * len(a__ ) , type=pa.binary() )
_A = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_A = pa.array([None] * len(a__ ) , type=pa.string() )
_A = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_A = pa.array([Audio().encode_example(a__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_A = storage.field("bytes" )
else:
_A = pa.array([None] * len(a__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_A = storage.field("path" )
else:
_A = pa.array([None] * len(a__ ) , type=pa.string() )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(a__ , self.pa_type )
def a_ ( self : int , a__ : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(a__ : int ):
with xopen(a__ , "rb" ) as f:
_A = f.read()
return bytes_
_A = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_A = pa.array(
[os.path.basename(a__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a__ , self.pa_type ) | 700 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowercase : List[Any] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ["""BeitFeatureExtractor"""]
lowercase : Dict = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[str] = AudioLDMPipeline
__A : List[str] = TEXT_TO_AUDIO_PARAMS
__A : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
__A : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __lowercase ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowercase , )
a__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0)
a__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
a__ : Tuple = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
a__ : str = ClapTextModelWithProjection(lowercase)
a__ : Dict = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77)
a__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowercase , )
a__ : Dict = SpeechTaHifiGan(lowercase)
a__ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> Optional[int]:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__ : int = torch.manual_seed(lowercase)
else:
a__ : List[str] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Optional[int] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[Any] = self.get_dummy_components()
a__ : int = AudioLDMPipeline(**lowercase)
a__ : Optional[int] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_dummy_inputs(lowercase)
a__ : Optional[int] = audioldm_pipe(**lowercase)
a__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) == 256
a__ : List[Any] = audio[:10]
a__ : Tuple = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : str = self.get_dummy_components()
a__ : Tuple = AudioLDMPipeline(**lowercase)
a__ : Any = audioldm_pipe.to(lowercase)
a__ : Tuple = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = self.get_dummy_inputs(lowercase)
a__ : Dict = 3 * [inputs['prompt']]
# forward
a__ : Union[str, Any] = audioldm_pipe(**lowercase)
a__ : List[str] = output.audios[0]
a__ : List[str] = self.get_dummy_inputs(lowercase)
a__ : Tuple = 3 * [inputs.pop('prompt')]
a__ : Optional[Any] = audioldm_pipe.tokenizer(
lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
a__ : str = text_inputs['input_ids'].to(lowercase)
a__ : Union[str, Any] = audioldm_pipe.text_encoder(
lowercase , )
a__ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : Any = F.normalize(lowercase , dim=-1)
a__ : Any = prompt_embeds
# forward
a__ : int = audioldm_pipe(**lowercase)
a__ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.get_dummy_components()
a__ : Tuple = AudioLDMPipeline(**lowercase)
a__ : str = audioldm_pipe.to(lowercase)
a__ : Union[str, Any] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : str = self.get_dummy_inputs(lowercase)
a__ : List[str] = 3 * ['this is a negative prompt']
a__ : Optional[int] = negative_prompt
a__ : Union[str, Any] = 3 * [inputs['prompt']]
# forward
a__ : Union[str, Any] = audioldm_pipe(**lowercase)
a__ : str = output.audios[0]
a__ : List[str] = self.get_dummy_inputs(lowercase)
a__ : int = 3 * [inputs.pop('prompt')]
a__ : Tuple = []
for p in [prompt, negative_prompt]:
a__ : Optional[int] = audioldm_pipe.tokenizer(
lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
a__ : Optional[int] = text_inputs['input_ids'].to(lowercase)
a__ : List[str] = audioldm_pipe.text_encoder(
lowercase , )
a__ : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : Optional[Any] = F.normalize(lowercase , dim=-1)
embeds.append(lowercase)
a__ , a__ : Union[str, Any] = embeds
# forward
a__ : str = audioldm_pipe(**lowercase)
a__ : int = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.get_dummy_components()
a__ : List[Any] = PNDMScheduler(skip_prk_steps=lowercase)
a__ : Any = AudioLDMPipeline(**lowercase)
a__ : Any = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = self.get_dummy_inputs(lowercase)
a__ : Tuple = 'egg cracking'
a__ : Optional[int] = audioldm_pipe(**lowercase , negative_prompt=lowercase)
a__ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) == 256
a__ : Optional[Any] = audio[:10]
a__ : List[str] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : Optional[int] = PNDMScheduler(skip_prk_steps=lowercase)
a__ : Optional[Any] = AudioLDMPipeline(**lowercase)
a__ : Dict = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : int = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
a__ : Dict = audioldm_pipe(lowercase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
a__ : Union[str, Any] = 2
a__ : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
a__ : Tuple = 2
a__ : int = audioldm_pipe(lowercase , num_inference_steps=2 , num_waveforms_per_prompt=lowercase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
a__ : Dict = 2
a__ : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowercase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : List[Any] = AudioLDMPipeline(**lowercase)
a__ : str = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
a__ : Union[str, Any] = self.get_dummy_inputs(lowercase)
a__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_16 , **lowercase)
a__ : int = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) / vocoder_sampling_rate == 0.0_16
a__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_32 , **lowercase)
a__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) / vocoder_sampling_rate == 0.0_32
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.get_dummy_components()
a__ : Optional[Any] = AudioLDMPipeline(**lowercase)
a__ : Dict = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = ['hey']
a__ : Dict = audioldm_pipe(lowercase , num_inference_steps=1)
a__ : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
a__ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
a__ : str = SpeechTaHifiGan(lowercase).to(lowercase)
a__ : Union[str, Any] = audioldm_pipe(lowercase , num_inference_steps=1)
a__ : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase)
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : str = np.random.RandomState(lowercase).standard_normal((1, 8, 128, 16))
a__ : Union[str, Any] = torch.from_numpy(lowercase).to(device=lowercase , dtype=lowercase)
a__ : Optional[Any] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
a__ : Tuple = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_inputs(lowercase)
a__ : Any = 25
a__ : str = audioldm_pipe(**lowercase).audios[0]
assert audio.ndim == 1
assert len(lowercase) == 8_1920
a__ : List[str] = audio[7_7230:7_7240]
a__ : Union[str, Any] = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15])
a__ : Union[str, Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
a__ : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
a__ : Union[str, Any] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_inputs(lowercase)
a__ : Any = audioldm_pipe(**lowercase).audios[0]
assert audio.ndim == 1
assert len(lowercase) == 8_1920
a__ : Optional[Any] = audio[2_7780:2_7790]
a__ : Any = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12])
a__ : Optional[Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2
| 302 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , ):
'''simple docstring'''
a_ : str = size if size is not None else {"height": 18, "width": 18}
a_ : Optional[Any] = parent
a_ : int = batch_size
a_ : Optional[Any] = num_channels
a_ : str = image_size
a_ : Dict = min_resolution
a_ : Optional[int] = max_resolution
a_ : Optional[int] = do_resize
a_ : Optional[int] = size
a_ : Tuple = apply_ocr
def _lowerCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _UpperCAmelCase ( UpperCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """apply_ocr""" ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
a_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
a_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase__ )
self.assertIsInstance(encoding.boxes , lowerCamelCase__ )
# Test batched
a_ : Tuple = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
a_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a_ : List[str] = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
a_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a_ : int = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
a_ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
a_ : Optional[Any] = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ : str = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a_ : Dict = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase__ )
self.assertListEqual(encoding.boxes , lowerCamelCase__ )
# with apply_OCR = False
a_ : str = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ )
a_ : int = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case: Dict = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: List[str] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Any = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: List[str] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__snake_case: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 460 | 0 |
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase : Optional[int] = str(abs(_lowercase ) )
UpperCAmelCase : Union[str, Any] = [list(_lowercase ) for char in range(len(_lowercase ) )]
for index in range(len(_lowercase ) ):
num_transpositions[index].pop(_lowercase )
return max(
int("".join(list(_lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 595 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def __lowerCAmelCase ( *lowercase : Any , **lowercase : str ):
'''simple docstring'''
pass
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : int = np.array(_lowercase )
UpperCAmelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] , lowercase : List[str] , lowercase : Dict , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=lowercase , image_processor=lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Optional[Any] , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "facebook/sam-vit-huge"
UpperCAmelCase : Optional[int] = pipeline("mask-generation" , model=lowercase )
UpperCAmelCase : Dict = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
] , )
| 595 | 1 |
class _UpperCamelCase :
def __init__( self :List[Any] , lowerCamelCase :int ) -> Optional[Any]:
UpperCAmelCase__ = val
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def UpperCAmelCase_ ( self :str , lowerCamelCase :Dict ) -> Optional[int]:
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase__ = Node(lowerCamelCase )
else:
self.left.insert(lowerCamelCase )
elif val > self.val:
if self.right is None:
UpperCAmelCase__ = Node(lowerCamelCase )
else:
self.right.insert(lowerCamelCase )
else:
UpperCAmelCase__ = val
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
if root:
inorder(root.left , _lowerCAmelCase )
res.append(root.val )
inorder(root.right , _lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : str ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return arr
UpperCAmelCase__ = Node(arr[0] )
for i in range(1 , len(_lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase__ = []
inorder(_lowerCAmelCase , _lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 364 |
from __future__ import annotations
def lowerCAmelCase ( _lowerCAmelCase : int = 4 ):
"""simple docstring"""
UpperCAmelCase__ = abs(_lowerCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(_lowerCAmelCase )] for y in range(_lowerCAmelCase )]
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(_lowerCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(_lowerCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(_lowerCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = [list(_lowerCAmelCase ) for x in zip(*_lowerCAmelCase )]
return matrix
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = matrix[::-1]
return matrix
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = [x[::-1] for x in matrix]
return matrix
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
_lowerCAmelCase : Union[str, Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
_lowerCAmelCase : Optional[int] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 364 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class A__ ( __snake_case ):
A__ = "open-llama"
def __init__( self : Tuple , _a : str=10_0000 , _a : Tuple=4096 , _a : Optional[int]=1_1008 , _a : List[Any]=32 , _a : str=32 , _a : Union[str, Any]="silu" , _a : Any=2048 , _a : Union[str, Any]=0.02 , _a : List[str]=1e-6 , _a : Union[str, Any]=True , _a : Optional[Any]=0 , _a : Any=1 , _a : int=2 , _a : Optional[Any]=False , _a : Any=True , _a : str=0.1 , _a : Tuple=0.1 , _a : List[str]=True , _a : List[str]=True , _a : Any=None , **_a : int , ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =rms_norm_eps
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =kwargs.pop(
'use_memorry_efficient_attention' , A__ )
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_dropout_prob
_SCREAMING_SNAKE_CASE =use_stable_embedding
_SCREAMING_SNAKE_CASE =shared_input_output_embedding
_SCREAMING_SNAKE_CASE =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ , )
def A ( self : Any ) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
_SCREAMING_SNAKE_CASE =self.rope_scaling.get('type' , A__ )
_SCREAMING_SNAKE_CASE =self.rope_scaling.get('factor' , A__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(A__ , A__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 405 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_UpperCamelCase : List[str] =HfArgumentParser(InitializationArguments)
_UpperCamelCase : Dict =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_UpperCamelCase : List[Any] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_UpperCamelCase : str ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_UpperCamelCase : int =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_UpperCamelCase : List[str] =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 206 | 0 |
import doctest
from collections import deque
import numpy as np
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self):
lowerCAmelCase_ = [2, 1, 2, -1]
lowerCAmelCase_ = [1, 2, 3, 4]
def lowercase__ ( self):
lowerCAmelCase_ = len(self.first_signal)
lowerCAmelCase_ = len(self.second_signal)
lowerCAmelCase_ = max(UpperCamelCase__ , UpperCamelCase__)
# create a zero matrix of max_length x max_length
lowerCAmelCase_ = [[0] * max_length for i in range(UpperCamelCase__)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase__):
lowerCAmelCase_ = deque(self.second_signal)
rotated_signal.rotate(UpperCamelCase__)
for j, item in enumerate(UpperCamelCase__):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase_ = np.matmul(np.transpose(UpperCamelCase__) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(UpperCamelCase__ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 712 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :List[Any] = 'codegen'
a :Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _UpperCAmelCase=50_400 , _UpperCAmelCase=2_048 , _UpperCAmelCase=2_048 , _UpperCAmelCase=4_096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50_256 , _UpperCAmelCase=50_256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_ctx
lowerCAmelCase_ = n_positions
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = n_inner
lowerCAmelCase_ = rotary_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase)
class UpperCamelCase_ ( A ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase)
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase):
# TODO: how to do that better?
lowerCAmelCase_ = 0
@property
def lowercase__ ( self):
lowerCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''')
lowerCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowercase__ ( self):
return self._config.n_layer
@property
def lowercase__ ( self):
return self._config.n_head
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowerCAmelCase_ = super(_UpperCAmelCase , self).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase)
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ = [
(torch.zeros(_UpperCAmelCase), torch.zeros(_UpperCAmelCase)) for _ in range(self.num_layers)
]
lowerCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase)] , dim=1)
return ordered_inputs
@property
def lowercase__ ( self):
return 13
| 413 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase :Optional[int] = TypeVar('''KT''')
lowerCAmelCase :Dict = TypeVar('''VT''')
class _lowerCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : Dict , _A : KT | str = "root" , _A : VT | None = None ) -> Optional[int]:
__magic_name__ : List[str] = key
__magic_name__ : List[str] = value
__magic_name__ : list[Node[KT, VT]] = []
def __repr__( self : Union[str, Any] ) -> str:
return F'Node({self.key}: {self.value})'
@property
def __lowerCAmelCase ( self : int ) -> int:
return len(self.forward )
class _lowerCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : List[Any] , _A : float = 0.5 , _A : int = 16 ) -> int:
__magic_name__ : Node[KT, VT] = Node[KT, VT]()
__magic_name__ : Dict = 0
__magic_name__ : Dict = p
__magic_name__ : Optional[Any] = max_level
def __str__( self : List[str] ) -> str:
__magic_name__ : Dict = list(self )
if len(_A ) == 0:
return F'SkipList(level={self.level})'
__magic_name__ : Any = max((len(str(_A ) ) for item in items) , default=4 )
__magic_name__ : Any = max(_A , 4 ) + 4
__magic_name__ : str = self.head
__magic_name__ : int = []
__magic_name__ : List[Any] = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(_A , '-' ) + '* ' * len(_A ) )
lines.append(' ' * label_size + '| ' * len(_A ) )
while len(node.forward ) != 0:
__magic_name__ : Any = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(_A , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(_A ) )
__magic_name__ : Union[str, Any] = node.forward
lines.append('None'.ljust(_A ) + '* ' * len(_A ) )
return F'SkipList(level={self.level})\n' + "\n".join(_A )
def __iter__( self : Dict ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__magic_name__ : List[Any] = node.forward[0]
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowerCAmelCase ( self : List[str] , _A : Optional[int] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
__magic_name__ : Dict = []
__magic_name__ : Union[str, Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__magic_name__ : Optional[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowerCAmelCase ( self : Optional[Any] , _A : KT ) -> Dict:
__magic_name__ , __magic_name__ : Tuple = self._locate_node(_A )
if node is not None:
for i, update_node in enumerate(_A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__magic_name__ : Tuple = node.forward[i]
else:
__magic_name__ : int = update_node.forward[:i]
def __lowerCAmelCase ( self : int , _A : KT , _A : VT ) -> Dict:
__magic_name__ , __magic_name__ : Optional[int] = self._locate_node(_A )
if node is not None:
__magic_name__ : Optional[Any] = value
else:
__magic_name__ : str = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _A ):
update_vector.append(self.head )
__magic_name__ : Union[str, Any] = level
__magic_name__ : str = Node(_A , _A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_A )
else:
__magic_name__ : Tuple = new_node
def __lowerCAmelCase ( self : str , _A : VT ) -> VT | None:
__magic_name__ , __magic_name__ : Any = self._locate_node(_A )
if node is not None:
return node.value
return None
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
__magic_name__ : Dict = skip_list.head
__magic_name__ : int = {}
while node.level != 0:
__magic_name__ : Tuple = node.forward[0]
__magic_name__ : int = node.value
assert len(lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
__magic_name__ : Dict = skip_list.head
__magic_name__ : Dict = {}
while node.level != 0:
__magic_name__ : Dict = node.forward[0]
__magic_name__ : Dict = node.value
if len(lowerCAmelCase ) != 4:
print()
assert len(lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = SkipList()
assert skip_list.find('Some key' ) is None
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : str = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(lowerCAmelCase : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCamelCase ( ):
"""simple docstring"""
def is_sorted(lowerCAmelCase : Tuple ):
return all(next_item >= item for item, next_item in zip(lowerCAmelCase , lst[1:] ) )
__magic_name__ : Dict = SkipList()
for i in range(10 ):
skip_list.insert(lowerCAmelCase , lowerCAmelCase )
assert is_sorted(list(lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCAmelCase ) )
def lowerCamelCase ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[Any] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 561 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__magic_name__ : Optional[Any] = ''
__magic_name__ : Optional[int] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__magic_name__ , __magic_name__ : str = 0, 0
# length[i] shows the length of palindromic substring with center i
__magic_name__ : Dict = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
__magic_name__ : Tuple = 0
for j in range(len(lowerCAmelCase ) ):
__magic_name__ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__magic_name__ : Union[str, Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__magic_name__ : Union[str, Any] = j - k + 1 # noqa: E741
__magic_name__ : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
__magic_name__ : Tuple = length[j]
__magic_name__ : Tuple = j
# create that string
__magic_name__ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 561 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCamelCase :Any = logging.get_logger(__name__)
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
_a = set()
_a = []
def parse_line(_UpperCamelCase ):
for line in fp:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_a = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCamelCase ) > 0:
_a = '''\n'''.join(_UpperCamelCase )
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets ):
selected_warnings.add(_UpperCamelCase )
buffer.clear()
continue
else:
_a = line.strip()
buffer.append(_UpperCamelCase )
if from_gh:
for filename in os.listdir(_UpperCamelCase ):
_a = os.path.join(_UpperCamelCase , _UpperCamelCase )
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCamelCase ) as fp:
parse_line(_UpperCamelCase )
else:
try:
with zipfile.ZipFile(_UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCamelCase ) as fp:
parse_line(_UpperCamelCase )
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
_a = set()
_a = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for p in os.listdir(_UpperCamelCase ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCamelCase , _UpperCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def __snake_case ( _UpperCamelCase ) -> Any:
return values.split(''',''' )
lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowerCamelCase :int = parser.parse_args()
lowerCamelCase :Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCamelCase :str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCamelCase :Tuple = extract_warnings(args.output_dir, args.targets)
lowerCamelCase :List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 346 |
import csv
import tweepy
# Twitter API credentials
lowerCamelCase :Optional[int] = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Optional[Any] = ''
def __snake_case ( _UpperCamelCase ) -> None:
# authorize twitter, initialize tweepy
_a = tweepy.OAuthHandler(_UpperCamelCase , _UpperCamelCase )
auth.set_access_token(_UpperCamelCase , _UpperCamelCase )
_a = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
_a = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a = api.user_timeline(screen_name=_UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
_a = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_a = api.user_timeline(
screen_name=_UpperCamelCase , count=2_00 , max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
_a = alltweets[-1].id - 1
print(f"...{len(_UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , '''w''' ) as f:
_a = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 346 | 1 |
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__magic_name__ : Tuple =str(bin(lowerCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__magic_name__ : List[Any] =str(bin(lowerCamelCase ) )[2:]
if shift_amount >= len(lowerCamelCase ):
return "0b0"
__magic_name__ : Tuple =binary_number[: len(lowerCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if number >= 0: # Get binary representation of positive number
__magic_name__ : List[str] ="""0""" + str(bin(lowerCamelCase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
__magic_name__ : str =len(bin(lowerCamelCase )[3:] ) # Find 2's complement of number
__magic_name__ : List[Any] =bin(abs(lowerCamelCase ) - (1 << binary_number_length) )[3:]
__magic_name__ : Union[str, Any] =(
"""1""" + """0""" * (binary_number_length - len(lowerCamelCase )) + binary_number
)
if shift_amount >= len(lowerCamelCase ):
return "0b" + binary_number[0] * len(lowerCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCamelCase_ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowercase ( __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = torch.load(__lowercase , map_location="cpu" )
_A = {}
_A = checkpoint["time_embed.0.weight"]
_A = checkpoint["time_embed.0.bias"]
_A = checkpoint["time_embed.2.weight"]
_A = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
_A = checkpoint["label_emb.weight"]
_A = checkpoint["input_blocks.0.0.weight"]
_A = checkpoint["input_blocks.0.0.bias"]
_A = unet_config["down_block_types"]
_A = unet_config["layers_per_block"]
_A = unet_config["attention_head_dim"]
_A = unet_config["block_out_channels"]
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = "mid_block.resnets.0"
_A = "middle_block.0"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.attentions.0"
_A = "middle_block.1"
_A = convert_attention(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.resnets.1"
_A = "middle_block.2"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = 0
_A = unet_config["up_block_types"]
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = checkpoint["out.0.weight"]
_A = checkpoint["out.0.bias"]
_A = checkpoint["out.2.weight"]
_A = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = strabool(args.class_cond)
lowerCamelCase_ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCamelCase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCamelCase_ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCamelCase_ = None
lowerCamelCase_ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCamelCase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCamelCase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCamelCase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCamelCase_ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCamelCase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 330 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__lowercase , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__lowercase , default=5 )
parser.add_argument('--batch_size' , type=__lowercase , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__lowercase , default=1 )
parser.add_argument('--freeze' , type=__lowercase , default=__lowercase )
parser.add_argument('--learning_rate' , type=__lowercase , default=5e-4 )
parser.add_argument('--seed' , type=__lowercase , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__lowercase , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__lowercase , default=10 )
parser.add_argument('--weight_decay' , type=__lowercase , default=0.0_1 )
parser.add_argument('--output_dir' , type=__lowercase , default='./results' )
return parser.parse_args()
a__ : Dict =load('''accuracy''')
def lowercase__ ( __lowercase : Tuple ) -> str:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = eval_pred
__UpperCamelCase = np.argmax(__lowercase , axis=1 )
return metric.compute(predictions=__lowercase , references=__lowercase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , __A : Optional[Any] ):
super().__init__()
__UpperCamelCase = trainer
def _lowerCamelCase ( self : Dict , __A : Any , __A : Union[str, Any] , __A : Tuple , **__A : Tuple ):
if control.should_evaluate:
__UpperCamelCase = deepcopy(__A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = get_args()
set_seed(args.seed )
__UpperCamelCase = load_dataset('codeparrot/codecomplex' , split='train' )
__UpperCamelCase = dataset.train_test_split(test_size=0.2 )
__UpperCamelCase = train_test['test'].train_test_split(test_size=0.5 )
__UpperCamelCase = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__UpperCamelCase = tokenizer.eos_token
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__UpperCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__UpperCamelCase = False
__UpperCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__lowercase : Union[str, Any] ):
__UpperCamelCase = tokenizer(example['src'] , truncation=__lowercase , max_length=1024 )
__UpperCamelCase = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__UpperCamelCase = train_test_validation.map(
__lowercase , batched=__lowercase , remove_columns=train_test_validation['train'].column_names , )
__UpperCamelCase = DataCollatorWithPadding(tokenizer=__lowercase )
__UpperCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
__UpperCamelCase = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , )
print('Training...' )
trainer.add_callback(CustomCallback(__lowercase ) )
trainer.train()
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ) -> Any:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' )
| 434 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase__ = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase__ = None
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :str = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" ,metavar="""data.json""" ,help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" ,metavar="""pred.json""" ,help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" ,"""-o""" ,metavar="""eval.json""" ,help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" ,"""-n""" ,metavar="""na_prob.json""" ,help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" ,"""-t""" ,type=snake_case__ ,default=1.0 ,help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" ,)
parser.add_argument(
"""--out-image-dir""" ,"""-p""" ,metavar="""out_images""" ,default=snake_case__ ,help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" ,"""-v""" ,action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase ( snake_case__ : Optional[Any] ):
'''simple docstring'''
__snake_case :Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__snake_case :Union[str, Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def UpperCamelCase ( snake_case__ : Optional[int] ):
'''simple docstring'''
def remove_articles(snake_case__ : Tuple ):
return ARTICLES_REGEX.sub(""" """ ,snake_case__ )
def white_space_fix(snake_case__ : List[str] ):
return " ".join(text.split() )
def remove_punc(snake_case__ : int ):
__snake_case :Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def UpperCamelCase ( snake_case__ : Optional[int] ):
'''simple docstring'''
if not s:
return []
return normalize_answer(snake_case__ ).split()
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : Tuple ):
'''simple docstring'''
return int(normalize_answer(snake_case__ ) == normalize_answer(snake_case__ ) )
def UpperCamelCase ( snake_case__ : int ,snake_case__ : Any ):
'''simple docstring'''
__snake_case :List[str] = get_tokens(snake_case__ )
__snake_case :int = get_tokens(snake_case__ )
__snake_case :Optional[int] = collections.Counter(snake_case__ ) & collections.Counter(snake_case__ )
__snake_case :Dict = sum(common.values() )
if len(snake_case__ ) == 0 or len(snake_case__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__snake_case :List[str] = 1.0 * num_same / len(snake_case__ )
__snake_case :int = 1.0 * num_same / len(snake_case__ )
__snake_case :Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ : str ,snake_case__ : Any ):
'''simple docstring'''
__snake_case :Any = {}
__snake_case :Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__snake_case :Optional[Any] = qa["""id"""]
__snake_case :Any = [t for t in qa["""answers"""]["""text"""] if normalize_answer(snake_case__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__snake_case :Tuple = [""""""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__snake_case :Tuple = preds[qid]
# Take max over all gold answers
__snake_case :Tuple = max(compute_exact(snake_case__ ,snake_case__ ) for a in gold_answers )
__snake_case :int = max(compute_fa(snake_case__ ,snake_case__ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : Dict ,snake_case__ : Optional[Any] ,snake_case__ : List[Any] ):
'''simple docstring'''
__snake_case :List[Any] = {}
for qid, s in scores.items():
__snake_case :Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
__snake_case :Tuple = float(not qid_to_has_ans[qid] )
else:
__snake_case :Any = s
return new_scores
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : Dict ,snake_case__ : List[str]=None ):
'''simple docstring'''
if not qid_list:
__snake_case :Optional[int] = len(snake_case__ )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
__snake_case :Tuple = len(snake_case__ )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def UpperCamelCase ( snake_case__ : List[str] ,snake_case__ : Any ,snake_case__ : Optional[Any] ):
'''simple docstring'''
for k in new_eval:
__snake_case :List[str] = new_eval[k]
def UpperCamelCase ( snake_case__ : str ,snake_case__ : Any ,snake_case__ : Any ,snake_case__ : List[Any] ):
'''simple docstring'''
plt.step(snake_case__ ,snake_case__ ,color="""b""" ,alpha=0.2 ,where="""post""" )
plt.fill_between(snake_case__ ,snake_case__ ,step="""post""" ,alpha=0.2 ,color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(snake_case__ )
plt.savefig(snake_case__ )
plt.clf()
def UpperCamelCase ( snake_case__ : int ,snake_case__ : int ,snake_case__ : int ,snake_case__ : Tuple ,snake_case__ : Union[str, Any]=None ,snake_case__ : Any=None ):
'''simple docstring'''
__snake_case :str = sorted(snake_case__ ,key=lambda snake_case__ : na_probs[k] )
__snake_case :Tuple = 0.0
__snake_case :List[Any] = 1.0
__snake_case :Optional[Any] = 0.0
__snake_case :Any = [1.0]
__snake_case :Union[str, Any] = [0.0]
__snake_case :List[Any] = 0.0
for i, qid in enumerate(snake_case__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__snake_case :Any = true_pos / float(i + 1 )
__snake_case :Dict = true_pos / float(snake_case__ )
if i == len(snake_case__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__ )
recalls.append(snake_case__ )
if out_image:
plot_pr_curve(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
return {"ap": 1_0_0.0 * avg_prec}
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : Tuple ,snake_case__ : int ,snake_case__ : List[str] ,snake_case__ : str ,snake_case__ : Optional[int] ):
'''simple docstring'''
if out_image_dir and not os.path.exists(snake_case__ ):
os.makedirs(snake_case__ )
__snake_case :Union[str, Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__snake_case :Tuple = make_precision_recall_eval(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,out_image=os.path.join(snake_case__ ,"""pr_exact.png""" ) ,title="""Precision-Recall curve for Exact Match score""" ,)
__snake_case :Any = make_precision_recall_eval(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,out_image=os.path.join(snake_case__ ,"""pr_f1.png""" ) ,title="""Precision-Recall curve for F1 score""" ,)
__snake_case :Tuple = {k: float(snake_case__ ) for k, v in qid_to_has_ans.items()}
__snake_case :int = make_precision_recall_eval(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,out_image=os.path.join(snake_case__ ,"""pr_oracle.png""" ) ,title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" ,)
merge_eval(snake_case__ ,snake_case__ ,"""pr_exact""" )
merge_eval(snake_case__ ,snake_case__ ,"""pr_f1""" )
merge_eval(snake_case__ ,snake_case__ ,"""pr_oracle""" )
def UpperCamelCase ( snake_case__ : str ,snake_case__ : Any ,snake_case__ : List[str] ,snake_case__ : Optional[Any] ):
'''simple docstring'''
if not qid_list:
return
__snake_case :int = [na_probs[k] for k in qid_list]
__snake_case :List[Any] = np.ones_like(snake_case__ ) / float(len(snake_case__ ) )
plt.hist(snake_case__ ,weights=snake_case__ ,bins=20 ,range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(snake_case__ ,f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase ( snake_case__ : str ,snake_case__ : str ,snake_case__ : Union[str, Any] ,snake_case__ : Optional[int] ):
'''simple docstring'''
__snake_case :Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__snake_case :List[str] = num_no_ans
__snake_case :Union[str, Any] = cur_score
__snake_case :Any = 0.0
__snake_case :Dict = sorted(snake_case__ ,key=lambda snake_case__ : na_probs[k] )
for i, qid in enumerate(snake_case__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__snake_case :List[Any] = scores[qid]
else:
if preds[qid]:
__snake_case :Union[str, Any] = -1
else:
__snake_case :List[Any] = 0
cur_score += diff
if cur_score > best_score:
__snake_case :Union[str, Any] = cur_score
__snake_case :Dict = na_probs[qid]
return 1_0_0.0 * best_score / len(snake_case__ ), best_thresh
def UpperCamelCase ( snake_case__ : Dict ,snake_case__ : str ,snake_case__ : Optional[Any] ,snake_case__ : Optional[Any] ,snake_case__ : Optional[Any] ,snake_case__ : Any ):
'''simple docstring'''
__snake_case , __snake_case :Dict = find_best_thresh(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
__snake_case , __snake_case :Optional[int] = find_best_thresh(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
__snake_case :Dict = best_exact
__snake_case :List[str] = exact_thresh
__snake_case :str = best_fa
__snake_case :int = fa_thresh
def UpperCamelCase ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
__snake_case :Any = json.load(snake_case__ )
__snake_case :List[Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
__snake_case :List[Any] = json.load(snake_case__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__snake_case :Dict = json.load(snake_case__ )
else:
__snake_case :Union[str, Any] = {k: 0.0 for k in preds}
__snake_case :Union[str, Any] = make_qid_to_has_ans(snake_case__ ) # maps qid to True/False
__snake_case :int = [k for k, v in qid_to_has_ans.items() if v]
__snake_case :Tuple = [k for k, v in qid_to_has_ans.items() if not v]
__snake_case , __snake_case :List[Any] = get_raw_scores(snake_case__ ,snake_case__ )
__snake_case :Optional[int] = apply_no_ans_threshold(snake_case__ ,snake_case__ ,snake_case__ ,OPTS.na_prob_thresh )
__snake_case :int = apply_no_ans_threshold(snake_case__ ,snake_case__ ,snake_case__ ,OPTS.na_prob_thresh )
__snake_case :Optional[int] = make_eval_dict(snake_case__ ,snake_case__ )
if has_ans_qids:
__snake_case :Optional[Any] = make_eval_dict(snake_case__ ,snake_case__ ,qid_list=snake_case__ )
merge_eval(snake_case__ ,snake_case__ ,"""HasAns""" )
if no_ans_qids:
__snake_case :Optional[Any] = make_eval_dict(snake_case__ ,snake_case__ ,qid_list=snake_case__ )
merge_eval(snake_case__ ,snake_case__ ,"""NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,OPTS.out_image_dir )
histogram_na_prob(snake_case__ ,snake_case__ ,OPTS.out_image_dir ,"""hasAns""" )
histogram_na_prob(snake_case__ ,snake_case__ ,OPTS.out_image_dir ,"""noAns""" )
if OPTS.out_file:
with open(OPTS.out_file ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
else:
print(json.dumps(snake_case__ ,indent=2 ) )
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 455 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
'''simple docstring'''
def __init__( self , a__=None , **a__ ) -> Tuple:
'''simple docstring'''
super().__init__(features=a__ )
__snake_case :int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(a__ , a__ ) and column:
if all(
isinstance(a__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a__ )
return column
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(a__ , (str, bytes, type(a__ )) ):
return value
elif isinstance(a__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case :Optional[int] = {}
if isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case :List[Any] = {"""dtype""": torch.intaa}
elif isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case :Tuple = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a__ , PIL.Image.Image ):
__snake_case :Union[str, Any] = np.asarray(a__ )
return torch.tensor(a__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowercase ( self , a__ ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a__ , """__array__""" ) and not isinstance(a__ , torch.Tensor ):
__snake_case :Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
elif isinstance(a__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
return self._tensorize(a__ )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , a__ , map_list=a__ )
def __lowercase ( self , a__ ) -> Mapping:
'''simple docstring'''
__snake_case :Tuple = self.numpy_arrow_extractor().extract_row(a__ )
__snake_case :Any = self.python_features_decoder.decode_row(a__ )
return self.recursive_tensorize(a__ )
def __lowercase ( self , a__ ) -> "torch.Tensor":
'''simple docstring'''
__snake_case :List[str] = self.numpy_arrow_extractor().extract_column(a__ )
__snake_case :List[Any] = self.python_features_decoder.decode_column(a__ , pa_table.column_names[0] )
__snake_case :Optional[Any] = self.recursive_tensorize(a__ )
__snake_case :Any = self._consolidate(a__ )
return column
def __lowercase ( self , a__ ) -> Mapping:
'''simple docstring'''
__snake_case :Optional[int] = self.numpy_arrow_extractor().extract_batch(a__ )
__snake_case :Tuple = self.python_features_decoder.decode_batch(a__ )
__snake_case :Tuple = self.recursive_tensorize(a__ )
for column_name in batch:
__snake_case :str = self._consolidate(batch[column_name] )
return batch
| 455 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict = """gptj"""
SCREAMING_SNAKE_CASE : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any]=50400 , __SCREAMING_SNAKE_CASE : Dict=2048 , __SCREAMING_SNAKE_CASE : Optional[Any]=4096 , __SCREAMING_SNAKE_CASE : Dict=28 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : int=64 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu_new" , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1e-5 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=50256 , __SCREAMING_SNAKE_CASE : Dict=50256 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[str]:
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_embd
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = n_inner
lowerCamelCase_ = rotary_dim
lowerCamelCase_ = activation_function
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = use_cache
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class a ( __snake_case ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> List[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , 'pad_token_id' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
lowerCamelCase_ = 0
@property
def UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
lowerCamelCase_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='inputs' )
lowerCamelCase_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase ( self : Union[str, Any] ) -> int:
return self._config.n_layer
@property
def UpperCamelCase ( self : Union[str, Any] ) -> int:
return self._config.n_head
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowerCamelCase_ = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
lowerCamelCase_ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase_ = seqlen + 2
lowerCamelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase_ = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
lowerCamelCase_ = common_inputs['attention_mask']
if self.use_past:
lowerCamelCase_ = ordered_inputs['attention_mask'].dtype
lowerCamelCase_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self : Dict ) -> int:
return 13
| 137 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : List[str] = random.Random()
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=1.0 , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None ) -> List[str]:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a ( unittest.TestCase ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : Any=2000 , __SCREAMING_SNAKE_CASE : Dict=2048 , __SCREAMING_SNAKE_CASE : Union[str, Any]=128 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=44100 , ) -> Tuple:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = spectrogram_length
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_audio_channels
lowerCamelCase_ = hop_length
lowerCamelCase_ = chunk_length
lowerCamelCase_ = sampling_rate
def UpperCamelCase ( self : List[str] ) -> int:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=False ) -> Tuple:
def _flatten(__SCREAMING_SNAKE_CASE : Optional[int] ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractor
def UpperCamelCase ( self : str ) -> Optional[Any]:
lowerCamelCase_ = TvltFeatureExtractionTester(self )
def UpperCamelCase ( self : Any ) -> Tuple:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'spectrogram_length' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'feature_size' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'num_audio_channels' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'hop_length' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'chunk_length' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'sampling_rate' ) )
def UpperCamelCase ( self : Any ) -> Optional[int]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop('mel_filters' )
lowerCamelCase_ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] ) -> str:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , 'feat_extract.json' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop('mel_filters' )
lowerCamelCase_ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> Any:
# Initialize feature_extractor
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCamelCase_ = feature_extractor(
__SCREAMING_SNAKE_CASE , return_tensors='np' , sampling_rate=44100 , mask_audio=__SCREAMING_SNAKE_CASE ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : List[str] ) -> Dict:
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = TvltFeatureExtractor()
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCamelCase_ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 137 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__magic_name__ : List[str] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__magic_name__ : Dict = F'https://www.google.com/search?q={query}&num=100'
__magic_name__ : int = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__magic_name__ : Dict = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__magic_name__ : str = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)['url'][0]
webbrowser.open(link)
| 672 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase ( UpperCAmelCase_ ):
_a = "Salesforce/blip-image-captioning-base"
_a = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_a = "image_captioner"
_a = AutoModelForVisionaSeq
_a = ["image"]
_a = ["text"]
def __init__( self , *_a , **_a ) -> Any:
requires_backends(self , ["""vision"""] )
super().__init__(*_snake_case , **_snake_case )
def a__ ( self , _a ) -> Union[str, Any]:
return self.pre_processor(images=_snake_case , return_tensors="""pt""" )
def a__ ( self , _a ) -> List[str]:
return self.model.generate(**_snake_case )
def a__ ( self , _a ) -> List[Any]:
return self.pre_processor.batch_decode(_snake_case , skip_special_tokens=_snake_case )[0].strip()
| 708 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_lowerCamelCase : Dict = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_lowerCamelCase : Dict = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A__ = bs[:]
A__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
A__ = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="replace" , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : Dict="<unk>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : List[str]="<mask>" , UpperCAmelCase__ : Optional[int]=False , **UpperCAmelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else bos_token
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else eos_token
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else sep_token
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else cls_token
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else unk_token
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else mask_token
super().__init__(
errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding='''utf-8''') as vocab_handle:
A__ = json.load(UpperCAmelCase__)
A__ = {v: k for k, v in self.encoder.items()}
A__ = errors # how to handle errors in decoding
A__ = bytes_to_unicode()
A__ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase__ , encoding='''utf-8''') as merges_handle:
A__ = merges_handle.read().split('''\n''')[1:-1]
A__ = [tuple(merge.split()) for merge in bpe_merges]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = {}
A__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any]) ->List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCAmelCase__)
A__ = get_pairs(UpperCAmelCase__)
if not pairs:
return token
while True:
A__ = min(UpperCAmelCase__ , key=lambda UpperCAmelCase__: self.bpe_ranks.get(UpperCAmelCase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCAmelCase__):
try:
A__ = word.index(UpperCAmelCase__ , UpperCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
A__ = j
if word[i] == first and i < len(UpperCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
A__ = tuple(UpperCAmelCase__)
A__ = new_word
if len(UpperCAmelCase__) == 1:
break
else:
A__ = get_pairs(UpperCAmelCase__)
A__ = ''' '''.join(UpperCAmelCase__)
A__ = word
return word
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
A__ = []
for token in re.findall(self.pat , UpperCAmelCase__):
A__ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase__).split(''' '''))
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any]) ->int:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
A__ = ''''''.join(UpperCAmelCase__)
A__ = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__) + '''\n''')
A__ = 0
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
A__ = token_index
writer.write(''' '''.join(UpperCAmelCase__) + '''\n''')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__)) + [1]
return [1] + ([0] * len(UpperCAmelCase__)) + [1, 1] + ([0] * len(UpperCAmelCase__)) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=False , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase__) > 0 and not text[0].isspace()):
A__ = ''' ''' + text
return (text, kwargs)
| 87 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
_lowercase = [0 for i in range(n + 1 )]
_lowercase = 1
_lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_lowercase = 1
_lowercase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }') | 287 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : VQModel ,_a : UNetaDModel ,_a : DDIMScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase__ ,unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : str ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : float = 0.0 ,_a : int = 50 ,_a : Optional[str] = "pil" ,_a : bool = True ,**_a : Tuple ,):
'''simple docstring'''
_a : Union[str, Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=lowerCamelCase__ ,)
_a : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a : int = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_a : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a : str = {}
if accepts_eta:
_a : Tuple = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_a : Tuple = self.scheduler.scale_model_input(lowerCamelCase__ ,lowerCamelCase__ )
# predict the noise residual
_a : Optional[Any] = self.unet(lowerCamelCase__ ,lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a : Union[str, Any] = self.scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
# decode the image latents with the VAE
_a : Optional[Any] = self.vqvae.decode(lowerCamelCase__ ).sample
_a : Tuple = (image / 2 + 0.5).clamp(0 ,1 )
_a : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_a : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 718 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[Any] = inspect.getfile(accelerate.test_utils )
_a : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_a : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
_a : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(_a ,env=os.environ.copy() )
| 319 | 0 |
from itertools import product
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = sides_number
SCREAMING_SNAKE_CASE_ = max_face_number * dice_number
SCREAMING_SNAKE_CASE_ = [0] * (max_total + 1)
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = range(_lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(_lowerCAmelCase , repeat=_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = sum(_lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def a ():
SCREAMING_SNAKE_CASE_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
SCREAMING_SNAKE_CASE_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 9
SCREAMING_SNAKE_CASE_ = 4 * 9
SCREAMING_SNAKE_CASE_ = 6
for peter_total in range(_lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
SCREAMING_SNAKE_CASE_ = (4**9) * (6**6)
SCREAMING_SNAKE_CASE_ = peter_wins_count / total_games_number
SCREAMING_SNAKE_CASE_ = round(_lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 234 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE ="""
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE ="""
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE ="""
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
'''simple docstring'''
def _A ( self: Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _A ( self: str , _lowerCamelCase: Dict , _lowerCamelCase: List[str] , _lowerCamelCase: List[str]=None , _lowerCamelCase: Tuple=1 , _lowerCamelCase: List[Any]="binary" , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: str="warn" , ):
SCREAMING_SNAKE_CASE_ = recall_score(
_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase , pos_label=_lowerCamelCase , average=_lowerCamelCase , sample_weight=_lowerCamelCase , zero_division=_lowerCamelCase , )
return {"recall": float(_lowerCamelCase ) if score.size == 1 else score}
| 234 | 1 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int , ) ->str:
'''simple docstring'''
a : Optional[Any] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
a, a : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
a : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowercase )
assert base_extractor.is_extractable(_lowercase )
a : Dict = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(_lowercase , _lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a : List[Any] = file_path.read_text(encoding="utf-8" )
else:
a : Optional[Any] = output_path.read_text(encoding="utf-8" )
a : Optional[int] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : str , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Any , _lowercase : List[Any] , _lowercase : Optional[int] , ) ->Union[str, Any]:
'''simple docstring'''
a : str = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
a : List[Any] = input_paths[compression_format]
if input_path is None:
a : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowercase )
a : str = Extractor.infer_extractor_format(_lowercase )
assert extractor_format is not None
a : Dict = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(_lowercase , _lowercase , _lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a : Optional[int] = file_path.read_text(encoding="utf-8" )
else:
a : Optional[int] = output_path.read_text(encoding="utf-8" )
a : List[str] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[Any] ) ->Dict:
'''simple docstring'''
import tarfile
a : int = tmp_path / "data_dot_dot"
directory.mkdir()
a : Optional[Any] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(_lowercase , "w" ) as f:
f.add(_lowercase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
import tarfile
a : Dict = tmp_path / "data_sym_link"
directory.mkdir()
a : str = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=_lowercase )
with tarfile.TarFile(_lowercase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[str] , _lowercase : int , _lowercase : int , _lowercase : str , _lowercase : int ) ->Optional[Any]:
'''simple docstring'''
a : Any = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
a : List[str] = insecure_tar_files[insecure_tar_file]
a : str = tmp_path / "extracted"
TarExtractor.extract(_lowercase , _lowercase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->List[str]:
'''simple docstring'''
a : List[Any] = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
a : Any = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(_lowercase )
assert zipfile.is_zipfile(str(_lowercase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_lowercase ) # but we're right
| 31 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict=7, UpperCamelCase__ : str=3, UpperCamelCase__ : List[Any]=30, UpperCamelCase__ : List[str]=4_00, UpperCamelCase__ : str=True, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Union[str, Any]=0.9, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Any=[0.5, 0.5, 0.5], UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5], ) -> Tuple:
_A = size if size is not None else {'shortest_edge': 30}
_A = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize_and_center_crop
_A = size
_A = crop_pct
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_A = PoolFormerImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Dict ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'crop_pct' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'image_std' ) )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size, {'height': 30, 'width': 30} )
_A = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84} )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
pass
def __UpperCAmelCase ( self : int ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 107 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCamelCase :
def __init__( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any]=None , __magic_name__ :List[Any]=None , __magic_name__ :Dict=None , __magic_name__ :List[Any]="resnet50" , __magic_name__ :Tuple=3 , __magic_name__ :Optional[Any]=32 , __magic_name__ :str=3 , __magic_name__ :str=True , __magic_name__ :Optional[Any]=True , ) ->str:
lowercase : Tuple = parent
lowercase : Tuple = out_indices if out_indices is not None else [4]
lowercase : Union[str, Any] = stage_names
lowercase : Tuple = out_features
lowercase : Optional[int] = backbone
lowercase : Optional[Any] = batch_size
lowercase : Tuple = image_size
lowercase : Union[str, Any] = num_channels
lowercase : List[Any] = use_pretrained_backbone
lowercase : str = is_training
def __snake_case ( self :List[Any] ) ->Optional[Any]:
lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[int] = self.get_config()
return config, pixel_values
def __snake_case ( self :Tuple ) ->int:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __snake_case ( self :Any , __magic_name__ :List[str] , __magic_name__ :Optional[int] ) ->Tuple:
lowercase : List[str] = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowercase : str = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __snake_case ( self :str ) ->Dict:
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase , lowercase : List[Any] = config_and_inputs
lowercase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCamelCase (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __snake_case ( self :Dict ) ->List[str]:
lowercase : List[str] = TimmBackboneModelTester(self )
lowercase : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self :Union[str, Any] ) ->Optional[Any]:
lowercase : Dict = """resnet18"""
lowercase : int = """microsoft/resnet-18"""
lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
lowercase : int = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowercase : Tuple = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
lowercase : Any = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __snake_case ( self :Tuple ) ->Optional[int]:
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __snake_case ( self :Optional[Any] ) ->int:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->Tuple:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __snake_case ( self :List[Any] ) ->List[str]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Any:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :int ) ->int:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :str ) ->Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Optional[int]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :List[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __snake_case ( self :Union[str, Any] ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __snake_case ( self :Tuple ) ->List[Any]:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __snake_case ( self :List[Any] ) ->int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
pass
def __snake_case ( self :Union[str, Any] ) ->Union[str, Any]:
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(__magic_name__ )
lowercase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :Any ) ->List[str]:
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = True
lowercase : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowercase : Union[str, Any] = self.all_model_classes[0]
lowercase : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
lowercase : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowercase : Dict = model(**__magic_name__ )
lowercase : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowercase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowercase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __snake_case ( self :Any ) ->List[Any]:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Any = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowercase : List[Any] = copy.deepcopy(__magic_name__ )
lowercase : Dict = None
lowercase : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Optional[Any] = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowercase : str = copy.deepcopy(__magic_name__ )
lowercase : int = False
lowercase : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Dict = model(**__magic_name__ )
| 264 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def a ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : int = 16000 ) -> Dict:
snake_case__ =int(round(sample_rate * max_length ) )
if len(UpperCamelCase_ ) <= sample_length:
return wav
snake_case__ =randint(0 , len(UpperCamelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__:
a_ : Optional[str] = field(default=snake_case__ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
a_ : Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
a_ : Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
a_ : Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
a_ : str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
a_ : str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
a_ : str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
a_ : str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
a_ : Optional[int] = field(
default=snake_case__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
a_ : Optional[int] = field(
default=snake_case__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
a_ : float = field(
default=2_0 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class a__:
a_ : str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
a_ : Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ : Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
a_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
a_ : Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
a_ : bool = field(
default=snake_case__ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
a_ : bool = field(
default=snake_case__ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
a_ : bool = field(
default=snake_case__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
a_ : Optional[bool] = field(
default=snake_case__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
a_ : bool = field(
default=snake_case__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _lowercase ( self ) -> Optional[Any]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , _UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def a ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ =training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case__ =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
snake_case__ =DatasetDict()
snake_case__ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
snake_case__ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case__ =AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case__ =raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case__ =feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase_ : Tuple ):
snake_case__ =[]
for audio in batch[data_args.audio_column_name]:
snake_case__ =random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCamelCase_ )
snake_case__ =feature_extractor(UpperCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
snake_case__ ={model_input_name: inputs.get(UpperCamelCase_ )}
snake_case__ =list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase_ : Tuple ):
snake_case__ =[audio['array'] for audio in batch[data_args.audio_column_name]]
snake_case__ =feature_extractor(UpperCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
snake_case__ ={model_input_name: inputs.get(UpperCamelCase_ )}
snake_case__ =list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case__ =raw_datasets['train'].features[data_args.label_column_name].names
snake_case__ , snake_case__ ={}, {}
for i, label in enumerate(UpperCamelCase_ ):
snake_case__ =str(UpperCamelCase_ )
snake_case__ =label
# Load the accuracy metric from the datasets package
snake_case__ =evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase_ : Tuple ):
snake_case__ =np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCamelCase_ , references=eval_pred.label_ids )
snake_case__ =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase_ ) , labelaid=UpperCamelCase_ , idalabel=UpperCamelCase_ , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case__ =AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case__ =(
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCamelCase_ , output_all_columns=UpperCamelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case__ =(
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCamelCase_ , output_all_columns=UpperCamelCase_ )
# Initialize our trainer
snake_case__ =Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# Training
if training_args.do_train:
snake_case__ =None
if training_args.resume_from_checkpoint is not None:
snake_case__ =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ =last_checkpoint
snake_case__ =trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ =trainer.evaluate()
trainer.log_metrics('eval' , UpperCamelCase_ )
trainer.save_metrics('eval' , UpperCamelCase_ )
# Write model card and (optionally) push to hub
snake_case__ ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
if __name__ == "__main__":
main()
| 581 |
'''simple docstring'''
from __future__ import annotations
def a ( UpperCamelCase_ : list[float] , UpperCamelCase_ : list[float] ) -> float:
snake_case__ =sorted(numsa + numsa )
snake_case__ , snake_case__ =divmod(len(UpperCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [float(x) for x in input('''Enter the elements of first array: ''').split()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 581 | 1 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('''nan''')
class __snake_case :
def __init__( self : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = sys.stdout
_lowerCamelCase : List[Any] = open(__lowerCAmelCase , '''a''' )
def __getattr__( self : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
return getattr(self.stdout , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
self.stdout.write(__lowerCAmelCase )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __lowerCAmelCase , 0 , re.M ) )
def snake_case_ ( A_ : str=80, A_ : List[Any]=False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
# deal with critical env vars
_lowerCamelCase : Union[str, Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_lowerCamelCase : str = os.environ.get(A_, A_ )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
_lowerCamelCase : int = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(A_ )
# now the normal args
cmd += list(map(shlex.quote, sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = ''''''
while len(A_ ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(A_ ) == 0 or len(A_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(A_ )
_lowerCamelCase : int = ''''''
return "\\\n".join(A_ )
def snake_case_ ( A_ : Tuple, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = re.sub(R'''[\\\n]+''', ''' ''', args.base_cmd )
# remove --output_dir if any and set our own
_lowerCamelCase : Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''', '''''', args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_lowerCamelCase : Optional[int] = re.sub('''--overwrite_output_dir\s+''', '''''', args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def snake_case_ ( A_ : int, A_ : List[Any], A_ : str, A_ : List[Any], A_ : Optional[int], A_ : Union[str, Any], A_ : List[str] ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0, 1_00 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )}, )
_lowerCamelCase : List[str] = subprocess.run(A_, capture_output=A_, text=A_ )
if verbose:
print('''STDOUT''', result.stdout )
print('''STDERR''', result.stderr )
# save the streams
_lowerCamelCase : Union[str, Any] = variation.replace(''' ''', '''-''' )
with open(Path(A_ ) / F'''log.{prefix}.stdout.txt''', '''w''' ) as f:
f.write(result.stdout )
with open(Path(A_ ) / F'''log.{prefix}.stderr.txt''', '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''', '''r''', encoding='''utf-8''' ) as f:
_lowerCamelCase : str = json.load(A_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def snake_case_ ( A_ : Dict, A_ : List[str], A_ : Optional[int], A_ : Any, A_ : List[Any], A_ : str, A_ : Union[str, Any], A_ : List[Any], A_ : Optional[Any], A_ : Any, ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = F'''{id}: {variation:<{longest_variation_len}}'''
_lowerCamelCase : int = F'''{preamble}: '''
_lowerCamelCase : List[str] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(A_ ), desc=A_, leave=A_ ):
_lowerCamelCase : Dict = process_run_single(
A_, A_, A_, A_, A_, A_, A_ )
_lowerCamelCase : Dict = single_run_metrics[target_metric_key]
if not math.isnan(A_ ):
metrics.append(A_ )
results.append(A_ )
outcome += "✓"
else:
outcome += "✘"
_lowerCamelCase : List[Any] = F'''\33[2K\r{outcome}'''
if len(A_ ) > 0:
_lowerCamelCase : str = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_lowerCamelCase : Optional[Any] = round(mean_metrics[target_metric_key], 2 )
_lowerCamelCase : List[Any] = F'''{outcome} {mean_target}'''
if len(A_ ) > 1:
results_str += F''' {tuple(round(A_, 2 ) for x in results )}'''
print(A_ )
_lowerCamelCase : Optional[Any] = variation
return mean_metrics
else:
print(A_ )
return {variation_key: variation, target_metric_key: nan}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def snake_case_ ( A_ : Union[str, Any], A_ : str, A_ : int, A_ : int, A_ : Any ):
'''simple docstring'''
_lowerCamelCase : str = pd.DataFrame(A_ )
_lowerCamelCase : Optional[Any] = '''variation'''
_lowerCamelCase : List[Any] = '''diff_%'''
_lowerCamelCase : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_lowerCamelCase : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(A_ ):
# as a fallback, use the minimal value as the sentinel
_lowerCamelCase : str = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(A_ ):
_lowerCamelCase : str = df.apply(
lambda A_ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0, axis='''columns''', )
# re-order columns
_lowerCamelCase : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_lowerCamelCase : Tuple = df.reindex(A_, axis='''columns''' ) # reorder cols
# capitalize
_lowerCamelCase : Any = df.rename(str.capitalize, axis='''columns''' )
# make the cols as narrow as possible
_lowerCamelCase : int = df.rename(lambda A_ : c.replace('''_''', '''<br>''' ), axis='''columns''' )
_lowerCamelCase : int = df.rename(lambda A_ : c.replace('''_''', '''\n''' ), axis='''columns''' )
_lowerCamelCase : str = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=A_, floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=A_, floatfmt='''.2f''' )]
print('''\n\n'''.join(A_ ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''', default=A_, type=A_, required=A_, help='''Base cmd''', )
parser.add_argument(
'''--variations''', default=A_, type=A_, nargs='''+''', required=A_, help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''', )
parser.add_argument(
'''--base-variation''', default=A_, type=A_, help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''', )
parser.add_argument(
'''--target-metric-key''', default=A_, type=A_, required=A_, help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''', )
parser.add_argument(
'''--report-metric-keys''', default='''''', type=A_, help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''', )
parser.add_argument(
'''--repeat-times''', default=1, type=A_, help='''How many times to re-run each variation - an average will be reported''', )
parser.add_argument(
'''--output_dir''', default='''output_benchmark''', type=A_, help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''', )
parser.add_argument(
'''--verbose''', default=A_, action='''store_true''', help='''Whether to show the outputs of each run or just the benchmark progress''', )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.output_dir
Path(A_ ).mkdir(exist_ok=A_ )
_lowerCamelCase : List[Any] = get_base_command(A_, A_ )
# split each dimension into its --foo variations
_lowerCamelCase : Tuple = [list(map(str.strip, re.split(R'''\|''', A_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_lowerCamelCase : str = list(map(str.strip, map(''' '''.join, itertools.product(*A_ ) ) ) )
_lowerCamelCase : Optional[Any] = max(len(A_ ) for x in variations )
# split wanted keys
_lowerCamelCase : List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_lowerCamelCase : List[str] = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
_lowerCamelCase : Tuple = Tee(A_ )
print(F'''\n*** Running {len(A_ )} benchmarks:''' )
print(F'''Base command: {" ".join(A_ )}''' )
_lowerCamelCase : Tuple = '''variation'''
_lowerCamelCase : Optional[Any] = []
for id, variation in enumerate(tqdm(A_, desc='''Total completion: ''', leave=A_ ) ):
_lowerCamelCase : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1, A_, A_, A_, A_, args.target_metric_key, A_, args.repeat_times, A_, args.verbose, ) )
process_results(A_, args.target_metric_key, A_, args.base_variation, A_ )
if __name__ == "__main__":
main()
| 83 | import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def snake_case ( snake_case__ :List[Any]) -> Dict:
_A = test_results.split(""" """)
_A = 0
_A = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_A = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(snake_case__):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def snake_case ( snake_case__ :int) -> Union[str, Any]:
_A = {}
_A = None
_A = False
for line in failures_short_lines.split("""\n"""):
if re.search(R"""_ \[doctest\]""" , snake_case__):
_A = True
_A = line.split(""" """)[2]
elif in_error and not line.split(""" """)[0].isdigit():
_A = line
_A = False
return failures
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = title
_A = doc_test_results["""time_spent"""].split(""",""" )[0]
_A = doc_test_results["""success"""]
_A = doc_test_results["""failures"""]
_A = self.n_success + self.n_failures
# Failures and success of the modeling tests
_A = doc_test_results
@property
def UpperCAmelCase ( self ) -> str:
_A = [self._time_spent]
_A = 0
for time in time_spent:
_A = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase_ ) == 1:
_A = [0, 0, time_parts[0]]
_A , _A , _A = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_A , _A , _A = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'''{int(lowerCAmelCase_ )}h{int(lowerCAmelCase_ )}m{int(lowerCAmelCase_ )}s'''
@property
def UpperCAmelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCAmelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def UpperCAmelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def UpperCAmelCase ( self ) -> Dict:
_A = 40
_A = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )}
_A = """"""
for category, failures in category_failures.items():
if len(lowerCAmelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def UpperCAmelCase ( self ) -> str:
_A = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase_ )
@staticmethod
def UpperCAmelCase ( ) -> str:
_A = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase_ , )
def UpperCAmelCase ( self ) -> Tuple:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_A = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
_A = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase_ , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = """"""
for key, value in failures.items():
_A = value[:2_00] + """ [Truncated]""" if len(lowerCAmelCase_ ) > 2_50 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
_A = job_name
_A = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_A = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCAmelCase ( self ) -> Union[str, Any]:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_A = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_A = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_A = F'''*Num failures* :{len(job_result['failed'] )} \n'''
_A = job_result["""failures"""]
_A = self.get_reply_blocks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text=lowerCAmelCase_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=lowerCAmelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def snake_case ( ) -> str:
_A = os.environ["""GITHUB_RUN_ID"""]
_A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_A = requests.get(snake_case__).json()
_A = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]})
_A = math.ceil((result["""total_count"""] - 100) / 100)
for i in range(snake_case__):
_A = requests.get(url + F'''&page={i + 2}''').json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]})
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , snake_case__)
return {}
def snake_case ( snake_case__ :str) -> str:
_A = {}
if os.path.exists(snake_case__):
_A = os.listdir(snake_case__)
for file in files:
try:
with open(os.path.join(snake_case__ , snake_case__) , encoding="""utf-8""") as f:
_A = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(snake_case__ , snake_case__)}.''') from e
return _artifact
def snake_case ( ) -> int:
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = name
_A = []
def __str__( self ) -> Union[str, Any]:
return self.name
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.paths.append({"""name""": self.name, """path""": path} )
_A = {}
_A = filter(os.path.isdir , os.listdir())
for directory in directories:
_A = directory
if artifact_name not in _available_artifacts:
_A = Artifact(snake_case__)
_available_artifacts[artifact_name].add_path(snake_case__)
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('run_doctests')
_SCREAMING_SNAKE_CASE = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['stats'])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ', '
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
_SCREAMING_SNAKE_CASE = line.replace('FAILED ', '')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('\n', '')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('::')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else 'N/A'
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 401 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowercase : Any = 'pytorch_model.bin'
lowercase : List[str] = 'pytorch_model.bin.index.json'
lowercase : List[Any] = 'adapter_config.json'
lowercase : str = 'adapter_model.bin'
lowercase : List[str] = 'adapter_model.safetensors'
lowercase : Any = 'tf_model.h5'
lowercase : str = 'tf_model.h5.index.json'
lowercase : Any = 'model.ckpt'
lowercase : Optional[int] = 'flax_model.msgpack'
lowercase : Dict = 'flax_model.msgpack.index.json'
lowercase : Dict = 'model.safetensors'
lowercase : Dict = 'model.safetensors.index.json'
lowercase : Union[str, Any] = 'config.json'
lowercase : Tuple = 'preprocessor_config.json'
lowercase : Tuple = FEATURE_EXTRACTOR_NAME
lowercase : Dict = 'generation_config.json'
lowercase : Dict = 'modelcard.json'
lowercase : Optional[int] = '▁'
lowercase : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowercase : List[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowercase : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowercase : Any = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> List[str]:
'''simple docstring'''
if version.parse(_lowerCamelCase) < version.parse(_lowerCamelCase):
if "dev" in min_version:
__UpperCamelCase : List[str] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__UpperCamelCase : Optional[Any] = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers.") | 94 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 200) -> int:
'''simple docstring'''
__UpperCamelCase : Any = [1, 2, 5, 10, 20, 50, 100, 200]
__UpperCamelCase : Any = [0] * (pence + 1)
__UpperCamelCase : Union[str, Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowerCamelCase , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682 | 94 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> tuple[int, int]:
if b == 0:
return (1, 0)
((a) , (a)) = extended_euclid(UpperCAmelCase__ , a % b)
a = a // b
return (y, x - k * y)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> int:
((a) , (a)) = extended_euclid(UpperCAmelCase__ , UpperCAmelCase__)
a = na * na
a = ra * x * na + ra * y * na
return (n % m + m) % m
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> int:
((a) , (a)) = extended_euclid(UpperCAmelCase__ , UpperCAmelCase__)
if b < 0:
a = (b % n + n) % n
return b
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> int:
a , a = invert_modulo(UpperCAmelCase__ , UpperCAmelCase__), invert_modulo(UpperCAmelCase__ , UpperCAmelCase__)
a = na * na
a = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 515 |
'''simple docstring'''
import os
import sys
import unittest
__lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCamelCase = os.path.join(git_repo_path, '''src''', '''transformers''')
__lowerCamelCase = '''
{0} = None
'''
__lowerCamelCase = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__lowerCamelCase = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(UpperCamelCase__ )
A_ = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(UpperCamelCase__ , """tokenizers""" )
A_ = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(UpperCamelCase__ , """tensorflow_text""" )
A_ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" )
A_ = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" )
A_ = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCamelCase__ )
self.assertIn("""tensorflow_text""" , UpperCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" )
A_ = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
A_ = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
A_ = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
A_ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
| 288 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : Any=10 , SCREAMING_SNAKE_CASE : Dict=1_00 , SCREAMING_SNAKE_CASE : int=10_26 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Dict="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE : int="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
snake_case__ : Any =generate_datasets(
a_ , a_ , number=a_ , min_len=10_26 , trim=a_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
snake_case__ : Tuple =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
snake_case__ : List[str] =load_gpta('''gpt2''' ).to(a_ )
print('''computing perplexity on objective set''' )
snake_case__ : Tuple =compute_perplexity(a_ , a_ , a_ ).item()
print('''perplexity on objective set:''' , a_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=15 , SCREAMING_SNAKE_CASE : List[Any]=1_28 , SCREAMING_SNAKE_CASE : Optional[Any]=1_00 , SCREAMING_SNAKE_CASE : Tuple="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
snake_case__ : Tuple =GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
snake_case__ : List[Any] =SecondaryLearner(a_ )
# Train secondary learner
snake_case__ : Tuple =train_secondary_learner(
a_ , a_ , max_epochs=a_ , batch_size=a_ , eval_freq=1_00 , igf_model_path=a_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : Tuple=10_00 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : List[str]=1.0 , SCREAMING_SNAKE_CASE : Tuple=recopy_gpta , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : List[Any]=10 , SCREAMING_SNAKE_CASE : str="gpt2_finetuned.pt" , ):
"""simple docstring"""
snake_case__ : int =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
snake_case__ : Any =RandomSampler(a_ )
snake_case__ : Any =DataLoader(a_ , sampler=a_ )
snake_case__ : Optional[Any] =max_steps // (len(a_ )) + 1
snake_case__ : Optional[Any] =0
snake_case__ : str =torch.zeros((1, context_len) , dtype=torch.long , device=a_ )
snake_case__ : List[str] =recopy_model(a_ , a_ , a_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a_ )
secondary_learner.eval()
snake_case__ : Optional[int] =[]
snake_case__ : Any =0
snake_case__ : Optional[Any] =[]
snake_case__ : Union[str, Any] =[]
# Compute the performance of the transformer model at the beginning
snake_case__ : Optional[Any] =compute_perplexity(a_ , a_ , a_ )
test_perps.append(a_ )
print('''Test perplexity, step''' , a_ , ''':''' , a_ )
for epoch in range(int(a_ ) ):
for step, example in enumerate(a_ ):
torch.cuda.empty_cache()
snake_case__ : List[str] =random.randint(0 , example.size(2 ) - context_len - 1 )
snake_case__ : Dict =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
snake_case__ : Optional[Any] =model(a_ , labels=a_ )
snake_case__ : Optional[Any] =True
if secondary_learner is not None:
snake_case__ : Optional[Any] =secondary_learner.forward(
torch.tensor(a_ , dtype=torch.long , device=a_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
snake_case__ : str =-1
if predicted_q < threshold:
snake_case__ : Tuple =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
snake_case__ : int =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
snake_case__ : Optional[int] =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
snake_case__ : int =compute_perplexity(a_ , a_ , a_ )
test_perps.append(a_ )
print('''Test perplexity, step''' , a_ , ''':''' , a_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Optional[Any] =argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a_ , type=a_ , required=a_ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a_ , default=a_ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a_ , default=a_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a_ , type=a_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a_ , default=a_ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=a_ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=a_ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=a_ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=a_ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a_ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a_ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=a_ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=a_ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a_ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a_ , type=a_ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a_ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a_ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a_ , type=a_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=a_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
snake_case__ : str =joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
snake_case__ : Tuple =training_secondary_learner(
a_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
snake_case__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
snake_case__ : int =generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=a_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a_ , a_ , a_ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=a_ , secondary_learner=a_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 705 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''mobilenet_v2'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.8 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.001 , __SCREAMING_SNAKE_CASE=255 , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
snake_case__ : Optional[int] =num_channels
snake_case__ : Optional[int] =image_size
snake_case__ : int =depth_multiplier
snake_case__ : Optional[Any] =depth_divisible_by
snake_case__ : Any =min_depth
snake_case__ : Tuple =expand_ratio
snake_case__ : int =output_stride
snake_case__ : List[Any] =first_layer_is_expansion
snake_case__ : Union[str, Any] =finegrained_output
snake_case__ : int =hidden_act
snake_case__ : Tuple =tf_padding
snake_case__ : List[Any] =classifier_dropout_prob
snake_case__ : Dict =initializer_range
snake_case__ : List[str] =layer_norm_eps
snake_case__ : str =semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =version.parse('''1.11''' )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCAmelCase ( self ) -> float:
"""simple docstring"""
return 1e-4
| 408 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Any:
'''simple docstring'''
__snake_case = tmp_path / '''file.csv'''
__snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = tmp_path / '''malformed_file.csv'''
__snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_image.csv'''
__snake_case = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Tuple:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_label.csv'''
__snake_case = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Union[str, Any]:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_int_list.csv'''
__snake_case = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any )-> Union[str, Any]:
'''simple docstring'''
__snake_case = Csv()
__snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCamelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(_lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def _UpperCamelCase (_lowerCamelCase : Dict )-> Optional[Any]:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = f.read().splitlines()[1]
__snake_case = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__snake_case = csv._generate_tables([[csv_file_with_image]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__snake_case = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = f.read().splitlines()[1:]
__snake_case = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__snake_case = csv._generate_tables([[csv_file_with_label]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__snake_case = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(_lowerCamelCase ) for label in labels]
def _UpperCamelCase (_lowerCamelCase : Tuple )-> Any:
'''simple docstring'''
__snake_case = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda _lowerCamelCase : [int(_lowerCamelCase ) for i in x.split()]} )
__snake_case = csv._generate_tables([[csv_file_with_int_list]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__snake_case = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 24 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 417 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase_ : Tuple = """Create a default config file for Accelerate with only a few flags set."""
def _A (__a="no" , __a = default_json_config_file , __a = False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Path(__a )
path.parent.mkdir(parents=__a , exist_ok=__a )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
SCREAMING_SNAKE_CASE_ : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
SCREAMING_SNAKE_CASE_ : str = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE_ : List[Any] = num_gpus
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE_ : Tuple = '''MULTI_GPU'''
else:
SCREAMING_SNAKE_CASE_ : List[Any] = '''NO'''
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE_ : str = torch.xpu.device_count()
SCREAMING_SNAKE_CASE_ : Dict = num_xpus
SCREAMING_SNAKE_CASE_ : Any = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''MULTI_XPU'''
else:
SCREAMING_SNAKE_CASE_ : Tuple = '''NO'''
elif is_npu_available():
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.npu.device_count()
SCREAMING_SNAKE_CASE_ : List[Any] = num_npus
SCREAMING_SNAKE_CASE_ : Any = False
if num_npus > 1:
SCREAMING_SNAKE_CASE_ : List[str] = '''MULTI_NPU'''
else:
SCREAMING_SNAKE_CASE_ : List[str] = '''NO'''
else:
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''NO'''
SCREAMING_SNAKE_CASE_ : Dict = ClusterConfig(**__a )
config.to_json_file(__a )
return path
def _A (__a , __a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parser.add_parser('''default''' , parents=__a , help=__a , formatter_class=__a )
parser.add_argument(
'''--config_file''' , default=__a , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=__a , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=__a )
return parser
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 176 |
"""simple docstring"""
from math import isqrt
def _A (__a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = False
return [i for i in range(2 , __a ) if is_prime[i]]
def _A (__a = 10**8 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[str] = len(__a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 176 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : Tuple = {
'''RUCAIBox/mvp''': 10_24,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_VOCAB_FILES_MAP
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[Any] = ["input_ids", "attention_mask"]
__a : Dict = MvpTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__="replace" ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<s>" ,snake_case__="<unk>" ,snake_case__="<pad>" ,snake_case__="<mask>" ,snake_case__=False ,snake_case__=True ,**snake_case__ ,):
super().__init__(
snake_case__ ,snake_case__ ,tokenizer_file=snake_case__ ,errors=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,unk_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : Tuple = getattr(snake_case__ ,pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : Tuple = add_prefix_space
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_ : Optional[int] = 'post_processor'
SCREAMING_SNAKE_CASE_ : str = getattr(self.backend_tokenizer ,snake_case__ ,snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ : List[str] = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(state['cls'] )
SCREAMING_SNAKE_CASE_ : List[Any] = False
if state.get('add_prefix_space' ,snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : Any = add_prefix_space
SCREAMING_SNAKE_CASE_ : str = True
if state.get('trim_offsets' ,snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE_ : Optional[int] = trim_offsets
SCREAMING_SNAKE_CASE_ : str = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(snake_case__ ,state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer ,snake_case__ ,snake_case__ )
@property
def snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else value
SCREAMING_SNAKE_CASE_ : Tuple = value
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.get('is_split_into_words' ,snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ ,**snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('is_split_into_words' ,snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._tokenizer.model.save(snake_case__ ,name=snake_case__ )
return tuple(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 105 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
def __init__( self :str, *snake_case :Optional[int], **snake_case :Tuple):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.', snake_case, )
super().__init__(*snake_case, **snake_case)
| 557 |
def _snake_case (_snake_case : int) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!')
elif p == 2:
return True
_lowercase =4
_lowercase =(1 << p) - 1
for _ in range(p - 2):
_lowercase =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 557 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( _A ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> int:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__(self , UpperCAmelCase = 1 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
_snake_case = self.unet.config.sample_size
_snake_case = (batch_size, 3, img_size, img_size)
_snake_case = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_snake_case = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_snake_case = self.scheduler.schedule[t]
_snake_case = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_snake_case = self.scheduler.add_noise_to_input(UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_snake_case = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_snake_case = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_snake_case = self.scheduler.step_correct(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , step_output.prev_sample , step_output["""derivative"""] , )
_snake_case = step_output.prev_sample
_snake_case = (sample / 2 + 0.5).clamp(0 , 1 )
_snake_case = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 585 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _snake_case :
_A = 42
_A = None
# Automatically constructed
_A = "dict"
_A = None
_A = field(default='Translation' , init=_A , repr=_A )
def __call__( self ) -> Optional[int]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class _snake_case :
_A = None
_A = None
_A = None
# Automatically constructed
_A = "dict"
_A = None
_A = field(default='TranslationVariableLanguages' , init=_A , repr=_A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :int = sorted(set(self.languages ) ) if self.languages else None
snake_case__ :Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Tuple = set(self.languages )
if self.languages and set(UpperCamelCase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(UpperCamelCase ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case__ :Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase ,UpperCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case__ , snake_case__ :Dict = zip(*sorted(UpperCamelCase ) )
return {"language": languages, "translation": translations}
def lowerCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
} | 241 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def A_ ( __UpperCamelCase : Dict ):
lowercase = '''huggingface/label-files'''
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowercase = {v: k for k, v in idalabel.items()}
lowercase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase = BitConfig(
conv_layer=__UpperCamelCase , num_labels=10_00 , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def A_ ( __UpperCamelCase : Tuple ):
if "stem.conv" in name:
lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowercase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowercase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowercase = '''bit.encoder.''' + name
return name
def A_ ( ):
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A_ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any]=False ):
lowercase = get_config(__UpperCamelCase )
# load original model from timm
lowercase = create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase = state_dict.pop(__UpperCamelCase )
lowercase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowercase = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
lowercase = create_transform(**resolve_data_config({} , model=__UpperCamelCase ) )
lowercase = transform.transforms
lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase = BitImageProcessor(
do_resize=__UpperCamelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__UpperCamelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase = prepare_img()
lowercase = transform(__UpperCamelCase ).unsqueeze(0 )
lowercase = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase , __UpperCamelCase )
# verify logits
with torch.no_grad():
lowercase = model(__UpperCamelCase )
lowercase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowerCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 396 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCAmelCase :
def __init__( self : int , a : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase = len(a ) - 1
def _lowerCAmelCase ( self : str , a : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a ) , 5 ) == 1
return output_values
def _lowerCAmelCase ( self : Optional[Any] , a : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = self.basis_function(a )
lowercase = 0.0
lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _lowerCAmelCase ( self : Tuple , a : float = 0.01 ) -> int:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowercase = [] # x coordinates of points to plot
lowercase = [] # y coordinates of points to plot
lowercase = 0.0
while t <= 1:
lowercase = self.bezier_curve_function(a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase = [i[0] for i in self.list_of_points]
lowercase = [i[1] for i in self.list_of_points]
plt.plot(
a , a , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(a , a , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 396 | 1 |
def A__ ( lowercase: str ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
A : int =''
while len(lowercase ) % 3 != 0:
A : str ='0' + bin_string
A : int =[
bin_string[index : index + 3]
for index in range(len(lowercase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
A : Optional[Any] =0
for index, val in enumerate(lowercase ):
oct_val += int(2 ** (2 - index) * int(lowercase ) )
oct_string += str(lowercase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305 | _lowercase : str ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase : List[str] =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase : int ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 305 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : List[Any] ):
debug_launcher(test_script.main )
def lowercase ( self : Union[str, Any] ):
debug_launcher(test_ops.main )
| 430 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCAmelCase__ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> List[Any]:
_snake_case = {}
state_dict.pop('''pixel_mean''' , __lowerCamelCase )
state_dict.pop('''pixel_std''' , __lowerCamelCase )
_snake_case = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_snake_case = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
_snake_case = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(2 ) )
if layer_nb == 0:
_snake_case = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
_snake_case = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
_snake_case = key.replace('''layers.2''' , '''proj_out''' )
_snake_case = value
_snake_case = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]="ybelkada/segment-anything" ) -> List[str]:
_snake_case = hf_hub_download(__lowerCamelCase , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_snake_case = SamConfig()
elif "sam_vit_l" in model_name:
_snake_case = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_snake_case = SamConfig(
vision_config=__lowerCamelCase , )
elif "sam_vit_h" in model_name:
_snake_case = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_snake_case = SamConfig(
vision_config=__lowerCamelCase , )
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = replace_keys(__lowerCamelCase )
_snake_case = SamImageProcessor()
_snake_case = SamProcessor(image_processor=__lowerCamelCase )
_snake_case = SamModel(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_snake_case = hf_model.to('''cuda''' )
_snake_case = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' )
_snake_case = [[[4_00, 6_50]]]
_snake_case = [[1]]
_snake_case = processor(images=np.array(__lowerCamelCase ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
_snake_case = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
_snake_case = ((75, 2_75, 17_25, 8_50),)
_snake_case = processor(images=np.array(__lowerCamelCase ) , input_boxes=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
_snake_case = [[[4_00, 6_50], [8_00, 6_50]]]
_snake_case = [[1, 1]]
_snake_case = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
UpperCAmelCase__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
UpperCAmelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 430 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 132 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowerCamelCase : int = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 0 |
import socket
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
lowercase = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
lowercase = socket.gethostname()
lowercase = 12312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowercase = sock.recv(1024 )
if not data:
break
out_file.write(UpperCAmelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 479 | import cmath
import math
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> complex:
"""simple docstring"""
lowercase = math.radians(UpperCAmelCase )
lowercase = math.radians(UpperCAmelCase )
# Convert voltage and current to rectangular form
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase (metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''torch''', '''torchsde''']
def __init__( self : int , *__magic_name__ : Tuple , **__magic_name__ : List[str] ) -> List[Any]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def __A ( cls : Any , *__magic_name__ : Dict , **__magic_name__ : Dict ) -> Any:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def __A ( cls : Tuple , *__magic_name__ : Any , **__magic_name__ : int ) -> Tuple:
requires_backends(cls , ["torch", "torchsde"] )
| 140 | import operator as op
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " )
print("-" * (3_0 + len(__UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
A : str = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 140 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase_ ( snake_case_ ):
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = path_or_paths
SCREAMING_SNAKE_CASE : Any = split if split or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else '''train'''
SCREAMING_SNAKE_CASE : Dict = features
SCREAMING_SNAKE_CASE : Optional[Any] = cache_dir
SCREAMING_SNAKE_CASE : int = keep_in_memory
SCREAMING_SNAKE_CASE : str = streaming
SCREAMING_SNAKE_CASE : Tuple = num_proc
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs
@abstractmethod
def __lowercase ( self : List[str] ):
"""simple docstring"""
pass
class lowerCamelCase_ ( snake_case_ ):
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Optional[int] = keep_in_memory
SCREAMING_SNAKE_CASE : int = streaming
SCREAMING_SNAKE_CASE : str = num_proc
SCREAMING_SNAKE_CASE : Optional[int] = kwargs
@abstractmethod
def __lowercase ( self : List[Any] ):
"""simple docstring"""
pass
| 464 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464 | 1 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''▁'''
__lowercase : str = {'''vocab_file''': '''prophetnet.tokenizer'''}
__lowercase : Dict = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
__lowercase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
__lowercase : int = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = collections.OrderedDict()
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as reader:
lowerCamelCase_ : Union[str, Any] = reader.readlines()
for index, token in enumerate(_lowercase ):
lowerCamelCase_ : List[Any] = token.rstrip('''\n''' )
lowerCamelCase_ : str = index
return vocab
class __lowercase ( _lowercase ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__(self , A , A="[SEP]" , A="[SEP]" , A="[SEP]" , A="[UNK]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A = None , **A , ):
lowerCamelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
lowerCamelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowerCamelCase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase_ : Optional[int] = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0 ):
lowerCamelCase_ : Union[str, Any] = F"""[unused{i}]"""
lowerCamelCase_ : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase_ : Union[str, Any] = 1_2
lowerCamelCase_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A )
def __getstate__(self ):
lowerCamelCase_ : Optional[Any] = self.__dict__.copy()
lowerCamelCase_ : Union[str, Any] = None
return state
def __setstate__(self , A ):
lowerCamelCase_ : List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return ([0] * len(A )) + [1]
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ (self ):
return len(self.sp_model ) + self.fairseq_offset
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ (self , A ):
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase__ (self , A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : Union[str, Any] = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ (self , A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : List[str] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowerCamelCase_ : int = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCAmelCase__ (self , A , A = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase_ : List[Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 422 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowercase ) )
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
lowerCamelCase_ : Dict = 0.0
for coeff in reversed(_lowercase ):
lowerCamelCase_ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
__lowercase : Any = (0.0, 0.0, 5.0, 9.3, 7.0)
__lowercase : Optional[int] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 422 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373 |
import numpy as np
def lowerCamelCase__ ( a : np.ndarray , a : np.ndarray , a : float = 1e-12 , a : int = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a )[0] == np.shape(a )[1]
# Ensure proper dimensionality.
assert np.shape(a )[0] == np.shape(a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a ) == np.iscomplexobj(a )
a__ :List[str] = np.iscomplexobj(a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
a__ :List[str] = False
a__ :str = 0
a__ :Dict = 0
a__ :Tuple = 1e12
while not convergence:
# Multiple matrix by the vector.
a__ :Tuple = np.dot(a , a )
# Normalize the resulting output vector.
a__ :List[Any] = w / np.linalg.norm(a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
a__ :List[str] = vector.conj().T if is_complex else vector.T
a__ :Any = np.dot(a , np.dot(a , a ) )
# Check convergence.
a__ :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
a__ :List[str] = True
a__ :Optional[int] = lambda_
if is_complex:
a__ :Union[str, Any] = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
a__ :List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
a__ :Dict = np.array([41, 4, 20] )
a__ :Union[str, Any] = real_input_matrix.astype(np.complexaaa )
a__ :Optional[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
a__ :Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
a__ :Union[str, Any] = real_input_matrix
a__ :int = real_vector
elif problem_type == "complex":
a__ :Optional[Any] = complex_input_matrix
a__ :List[Any] = complex_vector
# Our implementation.
a__ , a__ :Any = power_iteration(a , a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
a__ , a__ :int = np.linalg.eigh(a )
# Last eigenvalue is the maximum one.
a__ :int = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
a__ :Optional[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a ) - np.abs(a ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 373 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCAmelCase__ : List[Any] = get_logger(__name__)
UpperCAmelCase__ : int = Path(__file__).parent / '''model_card_template.md'''
UpperCAmelCase__ : Union[str, Any] = uuida().hex
UpperCAmelCase__ : Optional[int] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase__ : Tuple = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def _lowercase ( __SCREAMING_SNAKE_CASE = None ) -> str:
UpperCamelCase__ : List[Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ) -> int:
if token is None:
UpperCamelCase__ : str = HfFolder.get_token()
if organization is None:
UpperCamelCase__ : List[str] = whoami(__SCREAMING_SNAKE_CASE )['name']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__SCREAMING_SNAKE_CASE , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
UpperCamelCase__ : Dict = args.hub_token if hasattr(__SCREAMING_SNAKE_CASE , 'hub_token' ) else None
UpperCamelCase__ : Any = get_full_repo_name(__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , repo_name=__SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(__SCREAMING_SNAKE_CASE , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__SCREAMING_SNAKE_CASE , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__SCREAMING_SNAKE_CASE , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__SCREAMING_SNAKE_CASE , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__SCREAMING_SNAKE_CASE , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__SCREAMING_SNAKE_CASE , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__SCREAMING_SNAKE_CASE , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__SCREAMING_SNAKE_CASE , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__SCREAMING_SNAKE_CASE , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__SCREAMING_SNAKE_CASE , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__SCREAMING_SNAKE_CASE , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
UpperCamelCase__ : Optional[Any] = os.path.join(args.output_dir , 'README.md' )
model_card.save(__SCREAMING_SNAKE_CASE )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCamelCase__ : Tuple = str(Path(__SCREAMING_SNAKE_CASE ).as_posix() )
UpperCamelCase__ : Union[str, Any] = re.search(R'snapshots/([^/]+)/' , __SCREAMING_SNAKE_CASE )
if search is None:
return None
UpperCamelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCAmelCase__ : List[Any] = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
UpperCAmelCase__ : List[str] = os.path.join(hf_cache_home, '''diffusers''')
def _lowercase ( __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ) -> None:
if new_cache_dir is None:
UpperCamelCase__ : int = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCamelCase__ : Union[str, Any] = old_diffusers_cache
UpperCamelCase__ : Dict = Path(__SCREAMING_SNAKE_CASE ).expanduser()
UpperCamelCase__ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCamelCase__ : str = new_cache_dir / old_blob_path.relative_to(__SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
os.symlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCAmelCase__ : Any = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
UpperCAmelCase__ : Dict = 0
else:
with open(cache_version_file) as f:
try:
UpperCAmelCase__ : Dict = int(f.read())
except ValueError:
UpperCAmelCase__ : List[str] = 0
if cache_version < 1:
UpperCAmelCase__ : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
UpperCAmelCase__ : Dict = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'''the directory exists and can be written to.'''
)
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> str:
if variant is not None:
UpperCamelCase__ : Union[str, Any] = weights_name.split('.' )
UpperCamelCase__ : Union[str, Any] = splits[:-1] + [variant] + splits[-1:]
UpperCamelCase__ : Optional[int] = '.'.join(__SCREAMING_SNAKE_CASE )
return weights_name
def _lowercase ( __SCREAMING_SNAKE_CASE , *,
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , ) -> Tuple:
UpperCamelCase__ : Any = str(__SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(__SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
UpperCamelCase__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Dict = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse('0.20.0' )
):
try:
UpperCamelCase__ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , filename=_add_variant(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , user_agent=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}' so that the correct variant file can be added.""" , __SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
UpperCamelCase__ : int = hf_hub_download(
__SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , user_agent=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 410 |
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> str:
return " ".join(
''.join(word[::-1] ) if len(__SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 410 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
a__ : str = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
a__ : int = []
if args.gold_data_mode == "qa":
a__ : Optional[int] = pd.read_csv(__UpperCamelCase , sep="\t" , header=__UpperCamelCase )
for answer_list in data[1]:
a__ : Optional[Any] = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
a__ : List[str] = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
a__ : Dict = [[reference] for reference in references]
a__ : int = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a__ : Dict = 1_00.0 * em / total
a__ : Tuple = 1_00.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
a__ : Optional[int] = args.k
a__ : List[str] = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
a__ : int = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
a__ : Union[str, Any] = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
a__ : Optional[int] = set(hypo.split("\t" )[:k] )
a__ : str = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Dict = 1_00.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
def strip_title(__UpperCamelCase ):
if title.startswith("\"" ):
a__ : Union[str, Any] = title[1:]
if title.endswith("\"" ):
a__ : Any = title[:-1]
return title
a__ : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="pt" , padding=__UpperCamelCase , truncation=__UpperCamelCase , )["input_ids"].to(args.device )
a__ : int = rag_model.rag.question_encoder(__UpperCamelCase )
a__ : Dict = question_enc_outputs[0]
a__ : List[Any] = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : str = []
for docs in all_docs:
a__ : Union[str, Any] = [strip_title(__UpperCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__UpperCamelCase ) )
return provenance_strings
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
with torch.no_grad():
a__ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="pt" , padding=__UpperCamelCase , truncation=__UpperCamelCase )
a__ : Optional[Any] = inputs_dict.input_ids.to(args.device )
a__ : List[Any] = inputs_dict.attention_mask.to(args.device )
a__ : str = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info("Q: {} - A: {}".format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def SCREAMING_SNAKE_CASE( ) -> int:
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__UpperCamelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__UpperCamelCase , choices=["exact", "compressed", "legacy"] , type=__UpperCamelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__UpperCamelCase , type=__UpperCamelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__UpperCamelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__UpperCamelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__UpperCamelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__UpperCamelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__UpperCamelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__UpperCamelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__UpperCamelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__UpperCamelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__UpperCamelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Tuple:
a__ : Union[str, Any] = {}
if args.model_type is None:
a__ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : str = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : int = args.n_docs
if args.index_name is not None:
a__ : int = args.index_name
if args.index_path is not None:
a__ : str = args.index_path
else:
a__ : List[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __UpperCamelCase )
a__ : Tuple = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Tuple = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__UpperCamelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : List[Any] = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
a__ : Optional[int] = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
a__ : str = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[str] = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
a__ : int = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("\n".join(__UpperCamelCase ) + "\n" )
preds_file.flush()
a__ : str = []
if len(__UpperCamelCase ) > 0:
a__ : Dict = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("\n".join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase = get_args()
main(args)
| 703 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Optional[int] = "naver-clova-ix/donut-base-finetuned-docvqa"
A :Union[str, Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A :Any = "document_qa"
A :List[str] = AutoProcessor
A :Tuple = VisionEncoderDecoderModel
A :str = ["image", "text"]
A :int = ["text"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : str = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a__ : int = task_prompt.replace("{user_input}" , __UpperCAmelCase )
a__ : Any = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt" ).input_ids
a__ : int = self.pre_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = self.pre_processor.batch_decode(__UpperCAmelCase )[0]
a__ : str = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
a__ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
a__ : Optional[Any] = re.sub(R"<.*?>" , "" , __UpperCAmelCase , count=1 ).strip() # remove first task start token
a__ : int = self.pre_processor.tokenajson(__UpperCAmelCase )
return sequence["answer"]
| 207 | 0 |
def __lowercase ( a__ = 10_00 ) -> Dict:
__SCREAMING_SNAKE_CASE = 1, 1
__SCREAMING_SNAKE_CASE = []
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = prev_numerator + 2 * prev_denominator
__SCREAMING_SNAKE_CASE = prev_numerator + prev_denominator
if len(str(_A ) ) > len(str(_A ) ):
result.append(_A )
__SCREAMING_SNAKE_CASE = numerator
__SCREAMING_SNAKE_CASE = denominator
return len(_A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 148 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A ( _A, _A, _A=None, _A=None ):
"""simple docstring"""
if attention_mask is None:
snake_case_ :List[str] = tf.cast(tf.math.not_equal(_A, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
a__ = OPTConfig
a__ = {}
a__ = 'gelu'
def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=16 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=20 , a=2 , a=1 , a=0 , a=16 , a=16 , ):
"""simple docstring"""
snake_case_ :Dict = parent
snake_case_ :Tuple = batch_size
snake_case_ :int = seq_length
snake_case_ :List[Any] = is_training
snake_case_ :Tuple = use_labels
snake_case_ :List[str] = vocab_size
snake_case_ :Dict = hidden_size
snake_case_ :Union[str, Any] = num_hidden_layers
snake_case_ :Any = num_attention_heads
snake_case_ :List[str] = intermediate_size
snake_case_ :int = hidden_act
snake_case_ :Dict = hidden_dropout_prob
snake_case_ :Any = attention_probs_dropout_prob
snake_case_ :str = max_position_embeddings
snake_case_ :Tuple = eos_token_id
snake_case_ :Optional[int] = pad_token_id
snake_case_ :Optional[int] = bos_token_id
snake_case_ :Any = embed_dim
snake_case_ :Any = word_embed_proj_dim
snake_case_ :Tuple = False
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ :List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ :Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ :Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=a , **self.config_updates , )
snake_case_ :List[Any] = prepare_opt_inputs_dict(a , a )
return config, inputs_dict
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Union[str, Any] = TFOPTModel(config=a )
snake_case_ :Union[str, Any] = inputs_dict["input_ids"]
snake_case_ :Tuple = input_ids[:1, :]
snake_case_ :Union[str, Any] = inputs_dict["attention_mask"][:1, :]
snake_case_ :Union[str, Any] = 1
# first forward pass
snake_case_ :int = model(a , attention_mask=a , use_cache=a )
snake_case_ , snake_case_ :List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ :Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ :List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ :List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ :List[str] = model(a , attention_mask=a )[0]
snake_case_ :int = model(a , attention_mask=a , past_key_values=a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ :List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ :List[Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ :Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1e-3 )
@require_tf
class __lowerCAmelCase (__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a__ = (TFOPTForCausalLM,) if is_tf_available() else ()
a__ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
a__ = False
a__ = False
a__ = False
a__ = 10
def _a ( self ):
"""simple docstring"""
snake_case_ :Union[str, Any] = TFOPTModelTester(self )
snake_case_ :Tuple = ConfigTester(self , config_class=a )
def _a ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a )
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(a , a ):
if hasattr(a , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(a , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
snake_case_ :str = model_class(config=a )
snake_case_ :List[str] = _get_word_embedding_weight(a , model.get_input_embeddings() )
snake_case_ :Optional[Any] = _get_word_embedding_weight(a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(a )
snake_case_ :Tuple = _get_word_embedding_weight(a , model.get_input_embeddings() )
snake_case_ :Tuple = _get_word_embedding_weight(a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
snake_case_ :str = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , a )
# check that weights remain the same after resizing
snake_case_ :List[str] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ :List[Any] = False
self.assertTrue(a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , a )
snake_case_ :List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ :Optional[Any] = False
self.assertTrue(a )
def A ( _A ):
"""simple docstring"""
return tf.constant(_A, dtype=tf.intaa )
@require_tf
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
a__ = 99
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
snake_case_ :Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
snake_case_ :List[str] = input_ids.shape[0]
snake_case_ :Union[str, Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :int = TFOPTModel.from_pretrained("facebook/opt-350m" )
snake_case_ :List[Any] = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case_ :str = tf.not_equal(a , model.config.pad_token_id )
with tf.GradientTape():
snake_case_ :Dict = model(input_ids=a , attention_mask=a ).last_hidden_state
snake_case_ :Optional[int] = (1, 11, 5_12)
self.assertEqual(output.shape , a )
snake_case_ :Union[str, Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4e-3 ) )
snake_case_ :Optional[int] = tf.function(a , jit_compile=a )
snake_case_ :Optional[int] = xla_generate(a , a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4e-2 ) )
@require_tf
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
"""simple docstring"""
super().setUp()
snake_case_ :List[str] = "facebook/opt-350m"
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
snake_case_ :List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
snake_case_ :Tuple = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
snake_case_ :Optional[Any] = tokenizer(a , return_tensors="tf" , padding=a , add_special_tokens=a )
snake_case_ :Optional[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
snake_case_ :int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(a , a , atol=1e-4 ) )
snake_case_ :int = tf.function(a , jit_compile=a )
snake_case_ :List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(a , a , atol=1e-4 ) )
@require_tf
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@property
def _a ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _a ( self ):
"""simple docstring"""
snake_case_ :int = "facebook/opt-125m"
snake_case_ :List[str] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ :Tuple = []
snake_case_ :Optional[int] = GPTaTokenizer.from_pretrained(a )
snake_case_ :List[str] = TFOPTForCausalLM.from_pretrained(a )
for prompt in self.prompts:
snake_case_ :Dict = tokenizer(a , return_tensors="tf" ).input_ids
snake_case_ :str = model.generate(a , max_length=10 )
snake_case_ :Optional[Any] = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = "facebook/opt-350m"
snake_case_ :Tuple = GPTaTokenizer.from_pretrained(a )
snake_case_ :Tuple = TFOPTForCausalLM.from_pretrained(a )
snake_case_ :List[str] = "left"
# use different length sentences to test batching
snake_case_ :Dict = [
"Hello, my dog is a little",
"Today, I",
]
snake_case_ :int = tokenizer(a , return_tensors="tf" , padding=a )
snake_case_ :Dict = inputs["input_ids"]
snake_case_ :List[Any] = model.generate(input_ids=a , attention_mask=inputs["attention_mask"] )
snake_case_ :str = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
snake_case_ :Any = model.generate(input_ids=a )
snake_case_ :List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
snake_case_ :List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
snake_case_ :Union[str, Any] = model.generate(input_ids=a , max_length=model.config.max_length - num_paddings )
snake_case_ :Union[str, Any] = tokenizer.batch_decode(a , skip_special_tokens=a )
snake_case_ :int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a )
snake_case_ :Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a )
snake_case_ :str = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(a , a )
self.assertListEqual(a , [non_padded_sentence, padded_sentence] )
def _a ( self ):
"""simple docstring"""
snake_case_ :Tuple = "facebook/opt-350m"
snake_case_ :int = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ :str = []
snake_case_ :Tuple = GPTaTokenizer.from_pretrained(a )
snake_case_ :Any = TFOPTForCausalLM.from_pretrained(a )
for prompt in self.prompts:
snake_case_ :Any = tokenizer(a , return_tensors="tf" ).input_ids
snake_case_ :List[str] = model.generate(a , max_length=10 )
snake_case_ :str = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 584 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import * | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
with open(os.path.dirname(a_ ) + '/grid.txt' ) as f:
__a = [] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
__a = 0
# right
for i in range(20 ):
for j in range(17 ):
__a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a = temp
# down
for i in range(17 ):
for j in range(20 ):
__a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 490 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __snake_case ( lowerCAmelCase : List[Any] ):
__UpperCAmelCase = os.path.join(args.tf_model_dir , 'parameters.json' )
__UpperCAmelCase = json.loads(open(lowerCAmelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('.pt' ):
__UpperCAmelCase = args.output + '.pt'
__UpperCAmelCase = OrderedDict()
with tf.device('/CPU:0' ):
__UpperCAmelCase = tf.train.load_checkpoint(args.tf_model_dir )
__UpperCAmelCase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__UpperCAmelCase = reader.get_tensor(lowerCAmelCase ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
__UpperCAmelCase = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
__UpperCAmelCase = 8
__UpperCAmelCase = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith('model/moe' ):
__UpperCAmelCase = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
__UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/softmlp/kernel' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
__UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
__UpperCAmelCase = key_name[-9:-7]
for i in range(16 ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
__UpperCAmelCase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith('model/mlp' ):
__UpperCAmelCase = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
__UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/p1/bias' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
__UpperCAmelCase = vnp.copy() # same because it is one dimensional
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/p2/kernel' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
__UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/p2/bias' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
__UpperCAmelCase = vnp.copy() # same because it is one dimensional
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith('model/ln' ):
__UpperCAmelCase = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.norm.bias' % player
__UpperCAmelCase = vnp.copy() # same because it is one dimensional
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/g' ):
__UpperCAmelCase = 'model.blocks.%d.feed_forward.norm.weight' % player
__UpperCAmelCase = vnp.copy() # same because it is one dimensional
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith('model/att' ):
__UpperCAmelCase = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
__UpperCAmelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__UpperCAmelCase = state[:, 0, :, :]
__UpperCAmelCase = state[:, 1, :, :]
__UpperCAmelCase = state[:, 2, :, :]
__UpperCAmelCase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
__UpperCAmelCase = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
__UpperCAmelCase = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/o/kernel' ):
__UpperCAmelCase = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
__UpperCAmelCase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith('model/an' ):
__UpperCAmelCase = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
__UpperCAmelCase = 'model.blocks.%d.self_attn.norm.bias' % player
__UpperCAmelCase = vnp.copy() # same because it is one dimensional
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith('/g' ):
__UpperCAmelCase = 'model.blocks.%d.self_attn.norm.weight' % player
__UpperCAmelCase = vnp.copy() # same because it is one dimensional
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
__UpperCAmelCase = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
__UpperCAmelCase = 'model.%s.weight' % nlayer
__UpperCAmelCase = vnp.copy() # same in embedded
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
if key_name.startswith('model/wte' ):
__UpperCAmelCase = 'lm_head.weight'
__UpperCAmelCase = vnp.copy() # same in embedded
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith('model/wob' ):
__UpperCAmelCase = 'final_logits_bias'
__UpperCAmelCase = vnp.copy() # same in embedded
__UpperCAmelCase = state.reshape((1, -1) )
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense/kernel":
__UpperCAmelCase = 'model.last_project.weight'
__UpperCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense_1/bias":
__UpperCAmelCase = 'model.last_project.bias'
__UpperCAmelCase = vnp.copy() # same because it is one dimensional
__UpperCAmelCase = torch.tensor(lowerCAmelCase )
torch.save(lowerCAmelCase , args.output )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_UpperCamelCase : Tuple = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 396 | '''simple docstring'''
def __snake_case ( lowerCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__UpperCAmelCase = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input('Enter a string ').strip()
_UpperCamelCase : List[Any] = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 396 | 1 |
from __future__ import annotations
def lowercase__ ( __A: int ):
'''simple docstring'''
__magic_name__ : int = 2
__magic_name__ : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501 |
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[float, float] ,__A: int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowerCamelCase : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 501 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case__ , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[1_28, 2_56, 3_84] , __UpperCamelCase=[4, 6, 8] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = num_labels
snake_case_ = initializer_range
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = LevitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ )
snake_case_ = (self.image_size, self.image_size)
snake_case_ = image_size[0], image_size[1]
for _ in range(4 ):
snake_case_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
snake_case_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LevitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__A = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = LevitModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Levit does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __lowerCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(snake_case__ ) , snake_case__ )
snake_case_ = (self.model_tester.image_size, self.model_tester.image_size)
snake_case_ = image_size[0], image_size[1]
for _ in range(4 ):
snake_case_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
snake_case_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
snake_case_ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
snake_case_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case_ = model(**snake_case__ ).loss
loss.backward()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
snake_case_ = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
snake_case_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case_ = model(**snake_case__ ).loss
loss.backward()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
snake_case_ = problem_type["title"]
snake_case_ = problem_type["num_labels"]
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
snake_case_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
snake_case_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
snake_case_ = inputs["labels"].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
snake_case_ = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LevitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a():
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case__ )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
snake_case_ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 187 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=True , snake_case__=1 / 255 , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Any = min_resolution
lowerCAmelCase : List[str] = max_resolution
lowerCAmelCase : Dict = do_resize
lowerCAmelCase : int = size
lowerCAmelCase : int = do_normalize
lowerCAmelCase : str = image_mean
lowerCAmelCase : Optional[Any] = image_std
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Optional[int] = rescale_factor
lowerCAmelCase : Union[str, Any] = do_pad
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self , snake_case__ , snake_case__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase : Optional[Any] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : int = image.size
else:
lowerCAmelCase , lowerCAmelCase : int = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Optional[Any] = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase : int = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase : Dict = self.size["shortest_edge"]
lowerCAmelCase : str = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["shortest_edge"]
lowerCAmelCase : Any = self.size["shortest_edge"]
else:
lowerCAmelCase : Union[str, Any] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Dict = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowerCAmelCase : int = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =DeformableDetrImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = DeformableDetrImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "do_rescale" ) )
self.assertTrue(hasattr(snake_case__ , "do_pad" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Dict = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Dict = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCAmelCase : List[Any] = json.loads(f.read() )
lowerCAmelCase : List[str] = {"image_id": 39_769, "annotations": target}
# encode them
lowerCAmelCase : Optional[Any] = DeformableDetrImageProcessor()
lowerCAmelCase : Dict = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCAmelCase : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
lowerCAmelCase : str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCAmelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCAmelCase : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Union[str, Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify orig_size
lowerCAmelCase : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCAmelCase : Any = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCAmelCase : Optional[int] = json.loads(f.read() )
lowerCAmelCase : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
lowerCAmelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCAmelCase : str = DeformableDetrImageProcessor(format="coco_panoptic" )
lowerCAmelCase : Dict = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
lowerCAmelCase : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCAmelCase : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCAmelCase : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCAmelCase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify masks
lowerCAmelCase : List[Any] = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case__ )
# verify orig_size
lowerCAmelCase : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCAmelCase : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
| 645 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
lowercase_ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
lowercase_ = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
lowercase_ = BeautifulSoup(res.text, """html.parser""")
lowercase_ = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 706 |
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ = '''UperNetConfig'''
class lowerCamelCase_ ( nn.Module ):
def __init__( self : List[Any] , _A : int , _A : int , _A : Union[int, Tuple[int, int]] , _A : Union[int, Tuple[int, int], str] = 0 , _A : bool = False , _A : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(
in_channels=_A , out_channels=_A , kernel_size=_A , padding=_A , bias=_A , dilation=_A , )
UpperCAmelCase__ : Union[str, Any] = nn.BatchNormad(_A )
UpperCAmelCase__ : List[str] = nn.ReLU()
def lowercase_ ( self : int , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.conv(_A )
UpperCAmelCase__ : Union[str, Any] = self.batch_norm(_A )
UpperCAmelCase__ : Tuple = self.activation(_A )
return output
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : int , _A : int , _A : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[int] = [
nn.AdaptiveAvgPoolad(_A ),
UperNetConvModule(_A , _A , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_A ) , _A )
def lowercase_ ( self : Dict , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = input
for layer in self.layers:
UpperCAmelCase__ : Any = layer(_A )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : Tuple[int, ...] , _A : int , _A : int , _A : bool ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = pool_scales
UpperCAmelCase__ : Optional[Any] = align_corners
UpperCAmelCase__ : Optional[Any] = in_channels
UpperCAmelCase__ : Tuple = channels
UpperCAmelCase__ : Union[str, Any] = []
for i, pool_scale in enumerate(_A ):
UpperCAmelCase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_A , in_channels=_A , channels=_A )
self.blocks.append(_A )
self.add_module(str(_A ) , _A )
def lowercase_ ( self : int , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
for ppm in self.blocks:
UpperCAmelCase__ : Tuple = ppm(_A )
UpperCAmelCase__ : int = nn.functional.interpolate(
_A , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_A )
return ppm_outs
class lowerCamelCase_ ( nn.Module ):
def __init__( self : List[str] , _A : Union[str, Any] , _A : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = config
UpperCAmelCase__ : int = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase__ : int = in_channels
UpperCAmelCase__ : Dict = config.hidden_size
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase__ : Optional[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase__ : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase__ : str = UperNetConvModule(_A , self.channels , kernel_size=1 )
UpperCAmelCase__ : Optional[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_A )
self.fpn_convs.append(_A )
UpperCAmelCase__ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : Any , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = inputs[-1]
UpperCAmelCase__ : List[str] = [x]
psp_outs.extend(self.psp_modules(_A ) )
UpperCAmelCase__ : Dict = torch.cat(_A , dim=1 )
UpperCAmelCase__ : Tuple = self.bottleneck(_A )
return output
def lowercase_ ( self : str , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_A ) )
# build top-down path
UpperCAmelCase__ : int = len(_A )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase__ : Dict = laterals[i - 1].shape[2:]
UpperCAmelCase__ : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_A , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase__ : List[str] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase__ : Optional[Any] = torch.cat(_A , dim=1 )
UpperCAmelCase__ : int = self.fpn_bottleneck(_A )
UpperCAmelCase__ : List[str] = self.classifier(_A )
return output
class lowerCamelCase_ ( nn.Module ):
def __init__( self : str , _A : List[Any] , _A : int = 2 , _A : int = 3 , _A : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : int = config
UpperCAmelCase__ : Union[str, Any] = config.auxiliary_in_channels
UpperCAmelCase__ : Optional[int] = config.auxiliary_channels
UpperCAmelCase__ : Optional[Any] = config.auxiliary_num_convs
UpperCAmelCase__ : List[str] = config.auxiliary_concat_input
UpperCAmelCase__ : Dict = in_index
UpperCAmelCase__ : Union[str, Any] = (kernel_size // 2) * dilation
UpperCAmelCase__ : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
if self.num_convs == 0:
UpperCAmelCase__ : Union[str, Any] = nn.Identity()
else:
UpperCAmelCase__ : str = nn.Sequential(*_A )
if self.concat_input:
UpperCAmelCase__ : List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_A , padding=kernel_size // 2 )
UpperCAmelCase__ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.apply(self._init_weights )
def lowercase_ ( self : Any , _A : Dict ):
'''simple docstring'''
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : List[str] , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = encoder_hidden_states[self.in_index]
UpperCAmelCase__ : List[Any] = self.convs(_A )
if self.concat_input:
UpperCAmelCase__ : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase__ : List[Any] = self.classifier(_A )
return output
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = UperNetConfig
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = True
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
if isinstance(_A , _A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase_ ( self : str ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase_ ( self : Optional[int] , _A : int , _A : Union[str, Any]=False ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = value
UpperCamelCase__ = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __a , )
class lowerCamelCase_ ( __a ):
def __init__( self : Tuple , _A : List[Any] ):
'''simple docstring'''
super().__init__(_A )
UpperCAmelCase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase__ : List[str] = UperNetHead(_A , in_channels=self.backbone.channels )
UpperCAmelCase__ : int = UperNetFCNHead(_A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_A , config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self : Union[str, Any] , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase__ : Union[str, Any] = self.backbone.forward_with_filtered_kwargs(
_A , output_hidden_states=_A , output_attentions=_A )
UpperCAmelCase__ : Optional[Any] = outputs.feature_maps
UpperCAmelCase__ : Optional[Any] = self.decode_head(_A )
UpperCAmelCase__ : Optional[int] = nn.functional.interpolate(_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Optional[Any] = None
if self.auxiliary_head is not None:
UpperCAmelCase__ : Dict = self.auxiliary_head(_A )
UpperCAmelCase__ : List[Any] = nn.functional.interpolate(
_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Tuple = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase__ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase__ : Optional[Any] = loss_fct(_A , _A )
UpperCAmelCase__ : Tuple = loss_fct(_A , _A )
UpperCAmelCase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase__ : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCAmelCase__ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_A , logits=_A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 75 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a (unittest.TestCase):
"""simple docstring"""
def __init__( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = parent
def UpperCamelCase ( self ) -> Any:
return {}
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
_SCREAMING_SNAKE_CASE = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
# Initialize feature_extractor
_SCREAMING_SNAKE_CASE = self.feature_extraction_class()
# Test not batched input
_SCREAMING_SNAKE_CASE = get_html_strings()[0]
_SCREAMING_SNAKE_CASE = feature_extractor(A__ )
# fmt: off
_SCREAMING_SNAKE_CASE = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
_SCREAMING_SNAKE_CASE = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , A__ )
self.assertEqual(encoding.xpaths , A__ )
# Test batched
_SCREAMING_SNAKE_CASE = get_html_strings()
_SCREAMING_SNAKE_CASE = feature_extractor(A__ )
# fmt: off
_SCREAMING_SNAKE_CASE = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
_SCREAMING_SNAKE_CASE = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , A__ )
self.assertEqual(encoding.xpaths , A__ )
| 591 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__A =parent
__A =batch_size
__A =num_channels
__A =image_size
__A =min_resolution
__A =max_resolution
__A =do_resize
__A =size if size is not None else {'''height''': 1_8, '''width''': 2_0}
__A =do_thumbnail
__A =do_align_axis
__A =do_pad
__A =do_normalize
__A =image_mean
__A =image_std
def __UpperCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_std''' ) )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0} )
__A =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
# Previous config had dimensions in (width, height) order
__A =self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2} )
def __UpperCamelCase ( self ):
'''simple docstring'''
pass
@is_flaky()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
__A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
__A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
__A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 720 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase__ ):
'''simple docstring'''
super().__init__()
__A =torchvision.models.resnetaaa(pretrained=lowercase__ )
__A =list(model.children() )[:-2]
__A =nn.Sequential(*lowercase__ )
__A =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =self.pool(self.model(lowercase__ ) )
__A =torch.flatten(lowercase__ , start_dim=2 )
__A =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =[json.loads(lowercase__ ) for l in open(lowercase__ )]
__A =os.path.dirname(lowercase__ )
__A =tokenizer
__A =labels
__A =len(lowercase__ )
__A =max_seq_length
__A =transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self , lowercase__ ):
'''simple docstring'''
__A =torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowercase__ ) )
__A , __A , __A =sentence[0], sentence[1:-1], sentence[-1]
__A =sentence[: self.max_seq_length]
__A =torch.zeros(self.n_classes )
__A =1
__A =Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__A =self.transforms(lowercase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def A__ ( __A : Tuple ) ->Optional[int]:
__A =[len(row['''sentence'''] ) for row in batch]
__A , __A =len(__A ), max(__A )
__A =torch.zeros(__A , __A , dtype=torch.long )
__A =torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
__A =input_row['''sentence''']
__A =1
__A =torch.stack([row['''image'''] for row in batch] )
__A =torch.stack([row['''label'''] for row in batch] )
__A =torch.stack([row['''image_start_token'''] for row in batch] )
__A =torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A__ ( ) ->List[Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A__ ( ) ->List[Any]:
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 516 | 0 |
'''simple docstring'''
import math
from collections.abc import Callable
def __lowerCamelCase ( __snake_case : Callable[[float], float], __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
A__ : float =xa
A__ : float =xa
while True:
if x_n == x_na or function(__snake_case ) == function(__snake_case ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A__ : float =x_na - (
function(__snake_case ) / ((function(__snake_case ) - function(__snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A__ : Dict =x_na
A__ : List[str] =x_na
def __lowerCamelCase ( __snake_case : float ) -> float:
"""simple docstring"""
return math.pow(__snake_case, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 215 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Dict = logging.getLogger()
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : int =argparse.ArgumentParser()
parser.add_argument("""-f""" )
A__ : int =parser.parse_args()
return args.f
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
A__ : List[Any] =logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any ) -> int:
'''simple docstring'''
A__ : Optional[Any] =get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
A__ : Optional[Any] =run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any ="""
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Dict ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Tuple ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
| 215 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 327 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_attention_heads' ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[128, 256, 384] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Union[str, Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Dict = False
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __A ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> Optional[int]:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def __A ( self ) -> Dict:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def __A ( self ) -> List[Any]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def __A ( self ) -> Optional[int]:
pass
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __A ( self ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __A ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ ).loss
loss.backward()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ ).loss
loss.backward()
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE = problem_type['title']
SCREAMING_SNAKE_CASE = problem_type['num_labels']
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __A ( self ) -> List[Any]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 327 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def A__ ( A : int , A : int , A : int):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero.")
UpperCamelCase : int = b * b - 4 * a * c
UpperCamelCase : Optional[int] = (-b + sqrt(A)) / (2 * a)
UpperCamelCase : Any = (-b - sqrt(A)) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def A__ ( ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = quadratic_roots(a=5 , b=6 , c=1)
print(F'''The solutions are: {solutiona} and {solutiona}''')
if __name__ == "__main__":
main()
| 173 |
'''simple docstring'''
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : str = abs(A)
UpperCamelCase : str = 0
while n > 0:
res += n % 10
n //= 10
return res
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : Tuple = abs(A)
return n if n < 10 else n % 10 + sum_of_digits(n // 10)
def A__ ( A : int):
'''simple docstring'''
return sum(int(A) for c in str(abs(A)))
def A__ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int) -> None:
UpperCamelCase : Any = F'''{func.__name__}({value})'''
UpperCamelCase : Optional[int] = timeit(F'''__main__.{call}''' , setup="import __main__")
print(F'''{call:56} = {func(A)} -- {timing:.4f} seconds''')
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 173 | 1 |
from __future__ import annotations
__lowerCamelCase = 1.6021e-19 # units = C
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
_a : Any = LxmertConfig.from_json_file(UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_a : List[Any] = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 307 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]="resnet50" , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : str=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = out_indices if out_indices is not None else [4]
_snake_case = stage_names
_snake_case = out_features
_snake_case = backbone
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = use_pretrained_backbone
_snake_case = is_training
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = TimmBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
A__ : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
A__ : Optional[Any] = False
A__ : List[str] = False
A__ : Any = False
A__ : Optional[Any] = False
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = TimmBackboneModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = '''resnet18'''
_snake_case = '''microsoft/resnet-18'''
_snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , use_timm_backbone=__lowerCamelCase )
_snake_case = AutoBackbone.from_pretrained(__lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , use_timm_backbone=__lowerCamelCase , out_indices=[1, 2, 3] )
_snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
_snake_case = self.all_model_classes[0]
_snake_case = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
_snake_case = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_snake_case = model(**__lowerCamelCase )
_snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
_snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(**__lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_snake_case = copy.deepcopy(__lowerCamelCase )
_snake_case = None
_snake_case = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(**__lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_snake_case = copy.deepcopy(__lowerCamelCase )
_snake_case = False
_snake_case = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(**__lowerCamelCase )
| 103 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : int = 20 ):
"""simple docstring"""
__lowercase = 1
for i in range(1 , n + 1 ):
__lowercase = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 616 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowercase :
lowerCamelCase_ =field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase_ =field(default=__lowerCamelCase , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase_ =field(default=__lowerCamelCase , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase_ =field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class lowercase :
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase_ =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase_ =field(default=__lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase_ =field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase_ =field(
default=__lowerCamelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ =field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def __a ( __lowerCamelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase_ = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def __a ( ) -> str:
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , __lowerCamelCase , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCamelCase ) and data_args.train_val_split > 0.0:
lowercase_ = ds["train"].train_test_split(data_args.train_val_split )
lowercase_ = split["train"]
lowercase_ = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowerCamelCase )
elif model_args.model_name_or_path:
lowercase_ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
lowercase_ = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCamelCase )
elif model_args.model_name_or_path:
lowercase_ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
lowercase_ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase_ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowercase_ = ViTMAEForPreTraining(__lowerCamelCase )
if training_args.do_train:
lowercase_ = ds["train"].column_names
else:
lowercase_ = ds["validation"].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = "image"
elif "img" in column_names:
lowercase_ = "img"
else:
lowercase_ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase_ = image_processor.size["shortest_edge"]
else:
lowercase_ = (image_processor.size["height"], image_processor.size["width"])
lowercase_ = Compose(
[
Lambda(lambda __lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__lowerCamelCase : str ):
lowercase_ = [transforms(__lowerCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase_ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCamelCase )
# Compute absolute learning rate
lowercase_ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase_ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase_ = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCamelCase )
trainer.save_metrics("eval" , __lowerCamelCase )
# Write model card and (optionally) push to hub
lowercase_ = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def __a ( __lowerCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 461 | '''simple docstring'''
def __a ( __lowerCamelCase : int = 200 ) -> int:
'''simple docstring'''
lowercase_ = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ = [0] * (pence + 1)
lowercase_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 461 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 46 |
'''simple docstring'''
from math import isqrt
def lowercase_ ( __A : int ) -> list[int]:
"""simple docstring"""
lowercase : Dict =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __A , __A ):
lowercase : str =False
return [i for i in range(2 , __A ) if is_prime[i]]
def lowercase_ ( __A : int = 1_0**8 ) -> int:
"""simple docstring"""
lowercase : Dict =calculate_prime_numbers(max_number // 2 )
lowercase : str =0
lowercase : Optional[Any] =0
lowercase : Union[str, Any] =len(__A ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase : List[str] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase : Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase : Dict = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase : int = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase : Optional[Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase : int = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase : int = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCamelCase : Union[str, Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCamelCase : List[Any] = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCAmelCase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __call__( self :Tuple , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Union[bool, str] = False , lowerCamelCase__ :Union[bool, str] = False , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Optional[Union[str, TensorType]] = None , lowerCamelCase__ :Optional[bool] = None , **lowerCamelCase__ :int , ):
if titles is None and texts is None:
return super().__call__(
__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
elif titles is None or texts is None:
UpperCamelCase__ :Union[str, Any] = titles if texts is None else texts
return super().__call__(
__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ :List[str] = titles if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [titles]
UpperCamelCase__ :Dict = texts if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [texts]
UpperCamelCase__ :Union[str, Any] = len(__lowerCAmelCase )
UpperCamelCase__ :Union[str, Any] = questions if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [questions] * n_passages
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(__lowerCAmelCase )} titles and {len(__lowerCAmelCase )} texts.""" )
UpperCamelCase__ :List[str] = super().__call__(__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )["""input_ids"""]
UpperCamelCase__ :int = super().__call__(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )["""input_ids"""]
UpperCamelCase__ :Any = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCAmelCase , __lowerCAmelCase )
]
}
if return_attention_mask is not False:
UpperCamelCase__ :Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase__ :Union[str, Any] = attention_mask
return self.pad(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
def __a ( self :Optional[int] , lowerCamelCase__ :BatchEncoding , lowerCamelCase__ :DPRReaderOutput , lowerCamelCase__ :int = 16 , lowerCamelCase__ :int = 64 , lowerCamelCase__ :int = 4 , ):
UpperCamelCase__ :List[str] = reader_input["""input_ids"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = reader_output[:3]
UpperCamelCase__ :Tuple = len(__lowerCAmelCase )
UpperCamelCase__ :Dict = sorted(range(__lowerCAmelCase ) , reverse=__lowerCAmelCase , key=relevance_logits.__getitem__ )
UpperCamelCase__ :List[Any] = []
for doc_id in sorted_docs:
UpperCamelCase__ :Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase__ :int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase__ :Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase__ :List[Any] = len(__lowerCAmelCase )
UpperCamelCase__ :int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCAmelCase , top_spans=__lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCAmelCase , start_index=__lowerCAmelCase , end_index=__lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self :List[str] , lowerCamelCase__ :List[int] , lowerCamelCase__ :List[int] , lowerCamelCase__ :int , lowerCamelCase__ :int , ):
UpperCamelCase__ :Optional[Any] = []
for start_index, start_score in enumerate(__lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase__ :Any = sorted(__lowerCAmelCase , key=lambda lowerCamelCase__ : x[1] , reverse=__lowerCAmelCase )
UpperCamelCase__ :List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
UpperCamelCase__ :Optional[int] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
_snake_case : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_snake_case : Optional[Any] = ["""input_ids""", """attention_mask"""] | 710 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
UpperCamelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
UpperCamelCase = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def A ( lowercase__ : Union[str, Any] ) -> Any:
if isinstance(lowercase__ , lowercase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def A ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : Dict=False ) -> List[Any]:
UpperCamelCase__ :Any = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
UpperCamelCase__ :Optional[int] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
UpperCamelCase__ :Any = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
UpperCamelCase__ :int = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
UpperCamelCase__ :List[Any] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
UpperCamelCase__ :Tuple = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
UpperCamelCase__ :Dict = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
UpperCamelCase__ :Any = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
UpperCamelCase__ :Tuple = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
UpperCamelCase__ :Any = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCamelCase__ :Optional[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
UpperCamelCase__ :Tuple = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def A ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[Any]=None ) -> int:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCamelCase__ :Tuple = checkpoint[f"""{old_prefix}.norm.weight"""]
UpperCamelCase__ :Tuple = checkpoint[f"""{old_prefix}.norm.bias"""]
UpperCamelCase__ :Any = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ :List[str] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ :Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ :Any = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ :List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ :Tuple = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCamelCase__ :Dict = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A ( lowercase__ : str , lowercase__ : Dict ) -> int:
UpperCamelCase__ :str = torch.load(lowercase__ , map_location="""cpu""" )
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :str = checkpoint["""time_embed.0.weight"""]
UpperCamelCase__ :str = checkpoint["""time_embed.0.bias"""]
UpperCamelCase__ :int = checkpoint["""time_embed.2.weight"""]
UpperCamelCase__ :Tuple = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
UpperCamelCase__ :int = checkpoint["""label_emb.weight"""]
UpperCamelCase__ :Union[str, Any] = checkpoint["""input_blocks.0.0.weight"""]
UpperCamelCase__ :int = checkpoint["""input_blocks.0.0.bias"""]
UpperCamelCase__ :Dict = unet_config["""down_block_types"""]
UpperCamelCase__ :Optional[int] = unet_config["""layers_per_block"""]
UpperCamelCase__ :Dict = unet_config["""attention_head_dim"""]
UpperCamelCase__ :int = unet_config["""block_out_channels"""]
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Dict = channels_list[0]
for i, layer_type in enumerate(lowercase__ ):
UpperCamelCase__ :str = channels_list[i]
UpperCamelCase__ :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase__ ):
UpperCamelCase__ :List[Any] = f"""down_blocks.{i}.resnets.{j}"""
UpperCamelCase__ :Optional[int] = f"""input_blocks.{current_layer}.0"""
UpperCamelCase__ :str = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase__ :Any = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase__ ):
UpperCamelCase__ :Optional[Any] = f"""down_blocks.{i}.resnets.{j}"""
UpperCamelCase__ :Tuple = f"""input_blocks.{current_layer}.0"""
UpperCamelCase__ :List[str] = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase__ :List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
UpperCamelCase__ :List[str] = f"""down_blocks.{i}.attentions.{j}"""
UpperCamelCase__ :List[str] = f"""input_blocks.{current_layer}.1"""
UpperCamelCase__ :str = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
UpperCamelCase__ :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
UpperCamelCase__ :Any = f"""input_blocks.{current_layer}.0"""
UpperCamelCase__ :Optional[Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
UpperCamelCase__ :Dict = current_channels
# hardcoded the mid-block for now
UpperCamelCase__ :Tuple = """mid_block.resnets.0"""
UpperCamelCase__ :Any = """middle_block.0"""
UpperCamelCase__ :int = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :Union[str, Any] = """mid_block.attentions.0"""
UpperCamelCase__ :int = """middle_block.1"""
UpperCamelCase__ :List[Any] = convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :str = """mid_block.resnets.1"""
UpperCamelCase__ :Optional[int] = """middle_block.2"""
UpperCamelCase__ :List[Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowercase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCamelCase__ :str = f"""up_blocks.{i}.resnets.{j}"""
UpperCamelCase__ :List[str] = f"""output_blocks.{current_layer}.0"""
UpperCamelCase__ :Dict = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
UpperCamelCase__ :Union[str, Any] = f"""up_blocks.{i}.upsamplers.0"""
UpperCamelCase__ :List[Any] = f"""output_blocks.{current_layer-1}.1"""
UpperCamelCase__ :Union[str, Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCamelCase__ :str = f"""up_blocks.{i}.resnets.{j}"""
UpperCamelCase__ :List[str] = f"""output_blocks.{current_layer}.0"""
UpperCamelCase__ :Optional[int] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
UpperCamelCase__ :Any = f"""up_blocks.{i}.attentions.{j}"""
UpperCamelCase__ :str = f"""output_blocks.{current_layer}.1"""
UpperCamelCase__ :Union[str, Any] = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
UpperCamelCase__ :Dict = f"""up_blocks.{i}.upsamplers.0"""
UpperCamelCase__ :List[str] = f"""output_blocks.{current_layer-1}.2"""
UpperCamelCase__ :str = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :Union[str, Any] = checkpoint["""out.0.weight"""]
UpperCamelCase__ :Tuple = checkpoint["""out.0.bias"""]
UpperCamelCase__ :str = checkpoint["""out.2.weight"""]
UpperCamelCase__ :Optional[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 383 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
UpperCamelCase_: List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_: List[str] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase_: Any = 'A painting of a squirrel eating a burger'
UpperCamelCase_: Optional[Any] = torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: int = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
UpperCamelCase_: List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_: Optional[int] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase_: str = 'A painting of a squirrel eating a burger'
UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase_: str = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: Optional[Any] = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _a ( self ):
UpperCamelCase_: List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_: Optional[Any] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
UpperCamelCase_: Dict = 'A painting of a squirrel eating a burger'
UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase_: List[Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , )
UpperCamelCase_: Optional[Any] = output.images
UpperCamelCase_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: Dict = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 57 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = logging.get_logger()
# the current default level is logging.WARNING
_snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = logging.get_verbosity()
_snake_case = logging.get_logger('transformers.models.bart.tokenization_bart' )
_snake_case = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(lowerCAmelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_snake_case = logging.get_logger('transformers.models.bart.tokenization_bart' )
_snake_case = os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase_ )
_snake_case = logging.log_levels[env_level_str]
_snake_case = logging.get_verbosity()
self.assertEqual(
lowerCAmelCase_ , lowerCAmelCase_ , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
_snake_case = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
_snake_case = logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
_snake_case = logging.get_logger('transformers.models.bart.tokenization_bart' )
_snake_case = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning_advice(lowerCAmelCase_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning_advice(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 704 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
_snake_case = args.log_outputs
_snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
_snake_case = load_metric('wer' )
_snake_case = load_metric('cer' )
# compute metrics
_snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
_snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
_snake_case = F'WER: {wer_result}\nCER: {cer_result}'
print(__A )
with open(F'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(__A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_snake_case = F'log_{dataset_id}_predictions.txt'
_snake_case = F'log_{dataset_id}_targets.txt'
with open(__A , 'w' ) as p, open(__A , 'w' ) as t:
# mapping function to write output
def write_to_file(__A , __A ):
p.write(F'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__A , with_indices=__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_snake_case = re.sub(__A , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
_snake_case = ' '.join(text.split(__A ) )
return text
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
# load dataset
_snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
_snake_case = feature_extractor.sampling_rate
# resample audio
_snake_case = dataset.cast_column('audio' , Audio(sampling_rate=__A ) )
# load eval pipeline
if args.device is None:
_snake_case = 0 if torch.cuda.is_available() else -1
_snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__A ):
_snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_snake_case = prediction['text']
_snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
_snake_case = dataset.map(__A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__A , __A )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
lowercase : Optional[int] = parser.parse_args()
main(args)
| 542 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """▁"""
_lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowerCAmelCase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1_0_2_4,
}
# fmt: off
_lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class __UpperCamelCase ( _lowercase ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = ["input_ids", "attention_mask"]
_UpperCAmelCase = []
_UpperCAmelCase = []
def __init__( self ,_A ,_A=None ,_A=None ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else mask_token
_lowerCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase ,tgt_lang=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCAmelCase ,)
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
_lowerCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : List[Any] = len(self.sp_model )
_lowerCAmelCase : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCAmelCase )
}
_lowerCAmelCase : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase : Tuple = src_lang if src_lang is not None else '''en_XX'''
_lowerCAmelCase : Optional[int] = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : List[Any] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase ,out_type=__lowerCAmelCase )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Dict = self.sp_model.PieceToId(__lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = ''''''
_lowerCAmelCase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowerCAmelCase : Dict = True
_lowerCAmelCase : str = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
__lowerCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase ,'wb' ) as fi:
_lowerCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase ,token_ids_a=__lowerCAmelCase ,already_has_special_tokens=__lowerCAmelCase )
_lowerCAmelCase : int = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : int = src_lang
_lowerCAmelCase : Tuple = self(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCAmelCase : int = self.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = tgt_lang_id
return inputs
def __lowerCamelCase ( self ,_A ,_A = "en_XX" ,_A = None ,_A = "ro_RO" ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = src_lang
_lowerCAmelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.lang_code_to_id[src_lang]
_lowerCAmelCase : Tuple = [self.cur_lang_code_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase : List[Any] = [self.cur_lang_code_id]
_lowerCAmelCase : Any = [self.eos_token_id]
| 259 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a ( UpperCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = (DEISMultistepScheduler,)
__UpperCAmelCase = (("""num_inference_steps""", 25),)
def __magic_name__ ( self : List[str] , **snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Dict = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**snake_case_ )
return config
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[int]=0 , **snake_case_ : str ):
'''simple docstring'''
snake_case__ : str = dict(self.forward_default_kwargs )
snake_case__ : Optional[int] = kwargs.pop('''num_inference_steps''' , snake_case_ )
snake_case__ : str = self.dummy_sample
snake_case__ : str = 0.1 * sample
snake_case__ : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case__ : str = self.get_scheduler_config(**snake_case_ )
snake_case__ : Union[str, Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case__ : Optional[int] = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case__ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : List[Any] = sample, sample
for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ):
snake_case__ : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : str = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Any ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[int] , snake_case_ : List[str]=0 , **snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = dict(self.forward_default_kwargs )
snake_case__ : List[str] = kwargs.pop('''num_inference_steps''' , snake_case_ )
snake_case__ : Union[str, Any] = self.dummy_sample
snake_case__ : int = 0.1 * sample
snake_case__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case__ : Tuple = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case__ : List[Any] = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Union[str, Any] = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[str] , snake_case_ : str=None , **snake_case_ : Tuple ):
'''simple docstring'''
if scheduler is None:
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(**snake_case_ )
snake_case__ : int = scheduler_class(**snake_case_ )
snake_case__ : Optional[int] = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config(**snake_case_ )
snake_case__ : Any = scheduler_class(**snake_case_ )
snake_case__ : Dict = 1_0
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = model(snake_case_ , snake_case_ )
snake_case__ : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : List[Any] = dict(self.forward_default_kwargs )
snake_case__ : Dict = kwargs.pop('''num_inference_steps''' , snake_case_ )
for scheduler_class in self.scheduler_classes:
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**snake_case_ )
snake_case__ : str = self.dummy_sample
snake_case__ : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , '''set_timesteps''' ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , '''set_timesteps''' ):
snake_case__ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
snake_case__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
snake_case__ : Dict = scheduler.timesteps[5]
snake_case__ : List[str] = scheduler.timesteps[6]
snake_case__ : List[str] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : str = DEISMultistepScheduler(**self.get_scheduler_config() )
snake_case__ : List[str] = self.full_loop(scheduler=snake_case_ )
snake_case__ : Tuple = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
snake_case__ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case__ : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case__ : str = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case__ : str = DEISMultistepScheduler.from_config(scheduler.config )
snake_case__ : Optional[int] = self.full_loop(scheduler=snake_case_ )
snake_case__ : Tuple = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type='''deis''' , solver_order=snake_case_ , solver_type=snake_case_ , )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
snake_case__ : List[Any] = self.full_loop(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers"
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=snake_case_ )
self.check_over_configs(lower_order_final=snake_case_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = self.full_loop()
snake_case__ : Optional[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
snake_case__ : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 )
snake_case__ : str = scheduler_class(**snake_case_ )
snake_case__ : List[Any] = 1_0
snake_case__ : str = self.dummy_model()
snake_case__ : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : int = model(snake_case_ , snake_case_ )
snake_case__ : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
assert sample.dtype == torch.floataa
| 719 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowerCAmelCase__ : Any = True
except (ImportError, AttributeError):
lowerCAmelCase__ : Dict = object
def _a ( *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
pass
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[str] = logging.get_logger("""transformers-cli/serving""")
def _a ( __lowerCAmelCase : Namespace ):
"""simple docstring"""
snake_case__ : Union[str, Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__lowerCAmelCase , args.host , args.port , args.workers )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
def __magic_name__ ( snake_case_ : ArgumentParser ):
'''simple docstring'''
snake_case__ : Optional[Any] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=snake_case_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=snake_case_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=snake_case_ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=snake_case_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=snake_case_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=snake_case_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=snake_case_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=snake_case_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=snake_case_ )
def __init__( self : Union[str, Any] , snake_case_ : Pipeline , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Any = pipeline
snake_case__ : Tuple = host
snake_case__ : Optional[Any] = port
snake_case__ : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
snake_case__ : str = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=snake_case_ , response_class=snake_case_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=snake_case_ , response_class=snake_case_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=snake_case_ , response_class=snake_case_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=snake_case_ , response_class=snake_case_ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def __magic_name__ ( self : str ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__ ( self : List[str] , snake_case_ : str = Body(snake_case_ , embed=snake_case_ ) , snake_case_ : bool = Body(snake_case_ , embed=snake_case_ ) ):
'''simple docstring'''
try:
snake_case__ : Optional[Any] = self._pipeline.tokenizer.tokenize(snake_case_ )
if return_ids:
snake_case__ : Optional[int] = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case_ )
return ServeTokenizeResult(tokens=snake_case_ , tokens_ids=snake_case_ )
else:
return ServeTokenizeResult(tokens=snake_case_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(snake_case_ )} )
def __magic_name__ ( self : List[Any] , snake_case_ : List[int] = Body(snake_case_ , embed=snake_case_ ) , snake_case_ : bool = Body(snake_case_ , embed=snake_case_ ) , snake_case_ : bool = Body(snake_case_ , embed=snake_case_ ) , ):
'''simple docstring'''
try:
snake_case__ : Optional[int] = self._pipeline.tokenizer.decode(snake_case_ , snake_case_ , snake_case_ )
return ServeDeTokenizeResult(model='''''' , text=snake_case_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(snake_case_ )} )
async def __magic_name__ ( self : Tuple , snake_case_ : List[str]=Body(snake_case_ , embed=snake_case_ ) ):
'''simple docstring'''
if len(snake_case_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case__ : Tuple = self._pipeline(snake_case_ )
return ServeForwardResult(output=snake_case_ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(snake_case_ )} )
| 502 | 0 |
"""simple docstring"""
lowerCamelCase = 9.80_665
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 82 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : list, lowercase : int ) -> List[Any]:
"""simple docstring"""
if len(lowercase ) <= 1 or n <= 1:
return
insert_next(lowercase, n - 1 )
rec_insertion_sort(lowercase, n - 1 )
def a__ ( lowercase : list, lowercase : int ) -> Optional[Any]:
"""simple docstring"""
if index >= len(lowercase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCamelCase , _UpperCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowercase, index + 1 )
if __name__ == "__main__":
lowercase__ : str = input('Enter integers separated by spaces: ')
lowercase__ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 98 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = 'glpn'
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=64 , snake_case_=10 , snake_case_=-1 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
A__ : str = num_channels
A__ : List[Any] = num_encoder_blocks
A__ : List[str] = depths
A__ : int = sr_ratios
A__ : Optional[Any] = hidden_sizes
A__ : int = patch_sizes
A__ : str = strides
A__ : int = mlp_ratios
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = hidden_act
A__ : Optional[Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[str] = initializer_range
A__ : Dict = drop_path_rate
A__ : Any = layer_norm_eps
A__ : Any = decoder_hidden_size
A__ : Optional[Any] = max_depth
A__ : List[Any] = head_in_index
| 363 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCamelCase = "Create a default config file for Accelerate with only a few flags set."
def __magic_name__ ( SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE = default_json_config_file , SCREAMING_SNAKE_CASE = False ) -> str:
_lowercase : str = Path(SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_lowercase : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_lowercase : List[str] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_lowercase : Any = torch.cuda.device_count()
_lowercase : Union[str, Any] = num_gpus
_lowercase : Tuple = False
if num_gpus > 1:
_lowercase : str = 'MULTI_GPU'
else:
_lowercase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
_lowercase : int = torch.xpu.device_count()
_lowercase : int = num_xpus
_lowercase : Optional[Any] = False
if num_xpus > 1:
_lowercase : Union[str, Any] = 'MULTI_XPU'
else:
_lowercase : Optional[int] = 'NO'
elif is_npu_available():
_lowercase : Optional[Any] = torch.npu.device_count()
_lowercase : Optional[Any] = num_npus
_lowercase : Union[str, Any] = False
if num_npus > 1:
_lowercase : Union[str, Any] = 'MULTI_NPU'
else:
_lowercase : int = 'NO'
else:
_lowercase : List[Any] = 0
_lowercase : Any = True
_lowercase : Union[str, Any] = 1
_lowercase : List[Any] = 'NO'
_lowercase : Tuple = ClusterConfig(**SCREAMING_SNAKE_CASE )
config.to_json_file(SCREAMING_SNAKE_CASE )
return path
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Union[str, Any] = parser.add_parser('default' , parents=SCREAMING_SNAKE_CASE , help=SCREAMING_SNAKE_CASE , formatter_class=SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = SwinConfig()
_lowerCAmelCase = swin_name.split("_" )
_lowerCAmelCase = name_split[1]
_lowerCAmelCase = int(name_split[4] )
_lowerCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 6, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_lowerCAmelCase = 128
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (4, 8, 16, 32)
else:
_lowerCAmelCase = 192
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_lowerCAmelCase = 21841
else:
_lowerCAmelCase = 1000
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = img_size
_lowerCAmelCase = num_classes
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
return config
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_lowerCAmelCase = "encoder." + name
if "attn.proj" in name:
_lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
_lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
_lowerCAmelCase = "layernorm.bias"
if "head" in name:
_lowerCAmelCase = name.replace("head" , "classifier" )
else:
_lowerCAmelCase = "swin." + name
return name
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCAmelCase = key.split("." )
_lowerCAmelCase = int(key_split[1] )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[
:dim
]
_lowerCAmelCase = val[
dim : dim * 2
]
_lowerCAmelCase = val[
-dim:
]
else:
_lowerCAmelCase = val
return orig_state_dict
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
_lowerCAmelCase = get_swin_config(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = SwinForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCAmelCase = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
_lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
_lowerCAmelCase = timm_model(inputs["pixel_values"] )
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 18 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = MvpTokenizer
__UpperCAmelCase = MvpTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = filter_roberta_detectors
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Optional[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : int = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def _UpperCAmelCase ( self , **snake_case ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , **snake_case ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase : Dict = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Any = tokenizer(snake_case , padding=snake_case , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case )
self.assertIn("""attention_mask""" , snake_case )
self.assertNotIn("""labels""" , snake_case )
self.assertNotIn("""decoder_attention_mask""" , snake_case )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : int = tokenizer(text_target=snake_case , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : str = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=snake_case , truncation=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = ["""A long paragraph for summarization."""]
lowercase : List[str] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] = tokenizer(snake_case , text_target=snake_case , return_tensors="""pt""" )
lowercase : Union[str, Any] = inputs["""input_ids"""]
lowercase : Optional[Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : str = """A, <mask> AllenNLP sentence."""
lowercase : int = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
lowercase : str = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 607 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : List[str] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 702 |
import os
from pathlib import Path
def lowercase_ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase__ : Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 5 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : List[str] = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = random.randint(0 , len(lowercase_ ) - 1 )
_lowerCamelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCamelCase : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Tuple = list(lowercase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , ):
_lowerCamelCase : Optional[int] = []
# Generate more children proportionally to the fitness score.
_lowerCamelCase : Union[str, Any] = int(parent_a[1] * 1_00 ) + 1
_lowerCamelCase : Any = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_lowerCamelCase : Any = population_score[random.randint(0 , lowercase_ )][0]
_lowerCamelCase, _lowerCamelCase : int = crossover(parent_a[0] , lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ , lowercase_ ) )
pop.append(mutate(lowercase_ , lowercase_ ) )
return pop
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_lowerCamelCase : Optional[Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCamelCase : Optional[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCamelCase : str = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase_ )
# Generate random starting population.
_lowerCamelCase : List[Any] = []
for _ in range(lowercase_ ):
population.append(''''''.join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCamelCase, _lowerCamelCase : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCamelCase : List[str] = [evaluate(lowercase_ , lowercase_ ) for item in population]
# Check if there is a matching evolution.
_lowerCamelCase : Tuple = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCamelCase : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_lowerCamelCase : List[str] = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] , lowercase_ , lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_lowerCamelCase = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 114 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowerCamelCase = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowerCamelCase = {
'facebook/s2t-small-librispeech-asr': 1024,
}
_lowerCamelCase = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowerCamelCase = {'mustc': MUSTC_LANGS}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = []
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__=False , a__=False , a__=None , a__=None , a__ = None , **a__ , ):
"""simple docstring"""
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , do_upper_case=a__ , do_lower_case=a__ , tgt_lang=a__ , lang_codes=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCamelCase : Optional[int] = do_upper_case
_lowerCamelCase : Optional[Any] = do_lower_case
_lowerCamelCase : Tuple = load_json(a__)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Tuple = spm_file
_lowerCamelCase : Any = load_spm(a__ , self.sp_model_kwargs)
if lang_codes is not None:
_lowerCamelCase : List[Any] = lang_codes
_lowerCamelCase : List[str] = LANGUAGES[lang_codes]
_lowerCamelCase : Any = [F"""<lang:{lang}>""" for lang in self.langs]
_lowerCamelCase : Optional[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""") for lang in self.langs}
_lowerCamelCase : List[str] = self.lang_tokens
_lowerCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
_lowerCamelCase : Any = {}
@property
def __snake_case ( self):
"""simple docstring"""
return len(self.encoder)
@property
def __snake_case ( self):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = new_tgt_lang
self.set_tgt_lang_special_tokens(a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_lowerCamelCase : Any = [lang_code_id]
def __snake_case ( self , a__):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__)
def __snake_case ( self , a__):
"""simple docstring"""
return self.encoder.get(a__ , self.encoder[self.unk_token])
def __snake_case ( self , a__):
"""simple docstring"""
return self.decoder.get(a__ , self.unk_token)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCamelCase : List[Any] = self.sp_model.decode(a__)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCamelCase : Optional[int] = []
else:
current_sub_tokens.append(a__)
_lowerCamelCase : Tuple = self.sp_model.decode(a__)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case ( self , a__ , a__=None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , a__ , a__ = None , a__ = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
_lowerCamelCase : Tuple = [1] * len(self.prefix_tokens)
_lowerCamelCase : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__)) + suffix_ones
return prefix_ones + ([0] * len(a__)) + ([0] * len(a__)) + suffix_ones
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.__dict__.copy()
_lowerCamelCase : str = None
return state
def __setstate__( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Dict = load_spm(self.spm_file , self.sp_model_kwargs)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : str = Path(a__)
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_lowerCamelCase : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_lowerCamelCase : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , a__)
if os.path.abspath(self.spm_file) != os.path.abspath(a__) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a__)
elif not os.path.isfile(self.spm_file):
with open(a__ , '''wb''') as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (str(a__), str(a__))
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def __UpperCAmelCase( lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=2 )
| 114 | 1 |
'''simple docstring'''
def a ( ):
'''simple docstring'''
return 1
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCamelCase_ )
def a ( lowerCamelCase_ = 200 ):
'''simple docstring'''
return two_pound(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 718 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = JukeboxTokenizer
__lowerCamelCase : Dict = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def a__ (self ) -> Dict:
"""simple docstring"""
import torch
_a = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
_a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_a = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def a__ (self ) -> Optional[int]:
"""simple docstring"""
import torch
_a = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
_a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_a = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 11 |
def A_ ( lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_snake_case : List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
def lowercase ( _a ,_a ) -> List[Any]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase_: List[Any] = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_: Dict = boundary[0]
UpperCAmelCase_: str = boundary[1]
UpperCAmelCase_: List[Any] = make_points(_a ,_a ,_a )
UpperCAmelCase_: List[str] = 0.0
y += (h / 2.0) * f(_a )
for i in x_i:
# print(i)
y += h * f(_a )
y += (h / 2.0) * f(_a )
return y
def lowercase ( _a ,_a ,_a ) -> str:
UpperCAmelCase_: Any = a + h
while x < (b - h):
yield x
UpperCAmelCase_: List[Any] = x + h
def lowercase ( _a ) -> Union[str, Any]: # enter your function here
UpperCAmelCase_: Tuple = (x - 0) * (x - 0)
return y
def lowercase ( ) -> Optional[int]:
UpperCAmelCase_: str = 0.0 # Lower bound of integration
UpperCAmelCase_: Optional[Any] = 1.0 # Upper bound of integration
UpperCAmelCase_: List[str] = 10.0 # define number of steps or resolution
UpperCAmelCase_: str = [a, b] # define boundary of integration
UpperCAmelCase_: Dict = method_a(_a ,_a )
print(f"y = {y}" )
if __name__ == "__main__":
main() | 306 |
_lowerCAmelCase = 9.8_06_65
def lowercase ( _a ,_a ,_a = g ) -> float:
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 306 | 1 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
UpperCamelCase : Optional[int] = [10, 20, 30, 40, 50, 60]
UpperCamelCase : int = [2, 4, 6, 8, 10, 12]
UpperCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 210 )
def a_ ( self ):
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """max_weight must greater than zero.""" )
def a_ ( self ):
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """Weight can not be negative.""" )
def a_ ( self ):
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """Profit can not be negative.""" )
def a_ ( self ):
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """max_weight must greater than zero.""" )
def a_ ( self ):
self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 499 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__A : int = logging.get_logger(__name__)
__A : Optional[Any] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCamelCase ( _UpperCAmelCase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : str = max_length
UpperCamelCase : List[Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = input_ids.shape[-1]
UpperCamelCase : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
"""with `max_length = start_length + max_new_tokens` instead.""" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Dict = start_length
UpperCamelCase : List[Any] = max_new_tokens
UpperCamelCase : int = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : List[str] = max_time
UpperCamelCase : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase ( _UpperCAmelCase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return any(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def a_ ( self ):
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def A_ ( snake_case_ : StoppingCriteriaList ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = stopping_criteria.max_length
UpperCamelCase : Tuple = deepcopy(snake_case_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" ,snake_case_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case_ ) )
return new_stopping_criteria
| 499 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase : Union[str, Any] =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Union[str, Any]) ->int:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : Optional[Any] = eval_examples
_lowerCamelCase : Dict = post_process_function
_lowerCamelCase : int = quant_trainer_args
_lowerCamelCase : Union[str, Any] = 128 # default number of calibration samples
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[int]=None) ->Optional[int]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""")
_lowerCamelCase : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
_lowerCamelCase : str = self._remove_unused_columns(_UpperCamelCase , description="""Calibration""")
return DataLoader(
_UpperCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
_lowerCamelCase : Optional[int] = self.get_calib_dataloader(_UpperCamelCase)
_lowerCamelCase : List[str] = self.model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args , calib=_UpperCamelCase)
model.eval()
quant_trainer.enable_calibration(_UpperCamelCase)
logger.info("""***** Running calibration *****""")
logger.info(F""" Num examples = {self.calib_num}""")
logger.info(F""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(_UpperCamelCase):
# Prediction step
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = self.prediction_step(_UpperCamelCase , _UpperCamelCase , prediction_loss_only=_UpperCamelCase)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_UpperCamelCase , self.quant_trainer_args)
_lowerCamelCase : Any = model
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : str = "eval") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCamelCase : Dict = self.get_eval_dataloader(_UpperCamelCase)
_lowerCamelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Dict = self.compute_metrics
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : List[Any] = eval_loop(
_UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
_lowerCamelCase : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_lowerCamelCase : Dict = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions)
_lowerCamelCase : int = self.compute_metrics(_UpperCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
_lowerCamelCase : List[str] = metrics.pop(_UpperCamelCase)
self.log(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_lowerCamelCase : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCamelCase)
return metrics
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str = "test") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_test_dataloader(_UpperCamelCase)
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Optional[int] = self.compute_metrics
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : Union[str, Any] = eval_loop(
_UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
_lowerCamelCase : Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCamelCase : Optional[Any] = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions , """predict""")
_lowerCamelCase : Union[str, Any] = self.compute_metrics(_UpperCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
_lowerCamelCase : List[Any] = metrics.pop(_UpperCamelCase)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[str]="./") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.eval_dataset
_lowerCamelCase : List[Any] = self.get_eval_dataloader(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = next(iter(_UpperCamelCase))
# saving device - to make it consistent
_lowerCamelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
# convert to tuple
_lowerCamelCase : Optional[Any] = tuple(v.to(_UpperCamelCase) for k, v in batch.items())
logger.info("""Converting model to be onnx compatible""")
from pytorch_quantization.nn import TensorQuantizer
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Union[str, Any] = self.model.to(_UpperCamelCase)
model.eval()
model.float()
_lowerCamelCase : Any = model.module if hasattr(_UpperCamelCase , """module""") else model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args)
_lowerCamelCase : Tuple = os.path.join(_UpperCamelCase , """model.onnx""")
logger.info(F"""exporting model to {output_model_file}""")
_lowerCamelCase : List[str] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , export_params=_UpperCamelCase , opset_version=13 , do_constant_folding=_UpperCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_UpperCamelCase , )
logger.info("""onnx export finished""")
| 15 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 243 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Any = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ['MaskFormerFeatureExtractor']
UpperCAmelCase_ : List[str] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCAmelCase_ : Optional[int] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 365 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = "▁"
__a = {"vocab_file": "sentencepiece.bpe.model"}
__a = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
__a = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Any = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self: List[Any] , snake_case: Tuple , snake_case: List[str]="<s>" , snake_case: Optional[Any]="</s>" , snake_case: Union[str, Any]="</s>" , snake_case: Any="<s>" , snake_case: str="<unk>" , snake_case: int="<pad>" , snake_case: Dict="<mask>" , snake_case: Optional[Dict[str, Any]] = None , **snake_case: Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ :str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
snake_case_ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
snake_case_ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
snake_case_ :List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ :int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ :List[str] = 1
snake_case_ :List[Any] = len(self.sp_model ) + self.fairseq_offset
snake_case_ :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: str ) -> Optional[Any]:
snake_case_ :Dict = self.__dict__.copy()
snake_case_ :Any = None
snake_case_ :Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: Tuple , snake_case: Union[str, Any] ) -> Optional[Any]:
snake_case_ :Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :Any = {}
snake_case_ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ :List[str] = [self.cls_token_id]
snake_case_ :int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :List[str] = [self.sep_token_id]
snake_case_ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self: str ) -> int:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]:
snake_case_ :List[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str ) -> List[str]:
return self.sp_model.encode(snake_case , out_type=snake_case )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[str] ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ :str = self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase_ ( self: str , snake_case: str ) -> Any:
snake_case_ :List[str] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def lowerCAmelCase_ ( self: Tuple , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :List[str] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
snake_case_ :int = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 712 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__a = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def A_ ( ):
'''simple docstring'''
snake_case_ :Tuple = """https://pypi.org/pypi/diffusers/json"""
snake_case_ :List[Any] = json.loads(request.urlopen(_lowercase ).read() )["""releases"""].keys()
return sorted(_lowercase, key=lambda _lowercase : version.Version(_lowercase ) )
def A_ ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowercase )
os.makedirs(_lowercase, exist_ok=_lowercase )
snake_case_ :Union[str, Any] = Path(_lowercase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def A_ ( _lowercase ):
'''simple docstring'''
init_hf_modules()
snake_case_ :Optional[Any] = Path(_lowercase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowercase, exist_ok=_lowercase )
snake_case_ :List[str] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def A_ ( _lowercase ):
'''simple docstring'''
with open(_lowercase, """r""", encoding="""utf-8""" ) as f:
snake_case_ :List[str] = f.read()
# Imports of the form `import .xxx`
snake_case_ :Dict = re.findall("""^\s*import\s+\.(\S+)\s*$""", _lowercase, flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""", _lowercase, flags=re.MULTILINE )
# Unique-ify
return list(set(_lowercase ) )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = False
snake_case_ :Tuple = [module_file]
snake_case_ :Any = []
# Let's recurse through all relative imports
while not no_change:
snake_case_ :Optional[int] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowercase ) )
snake_case_ :Optional[Any] = Path(_lowercase ).parent
snake_case_ :List[Any] = [str(module_path / m ) for m in new_imports]
snake_case_ :List[Any] = [f for f in new_import_files if f not in all_relative_imports]
snake_case_ :Optional[Any] = [f"""{f}.py""" for f in new_import_files]
snake_case_ :Optional[Any] = len(_lowercase ) == 0
all_relative_imports.extend(_lowercase )
return all_relative_imports
def A_ ( _lowercase ):
'''simple docstring'''
with open(_lowercase, """r""", encoding="""utf-8""" ) as f:
snake_case_ :Optional[int] = f.read()
# Imports of the form `import xxx`
snake_case_ :int = re.findall("""^\s*import\s+(\S+)\s*$""", _lowercase, flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""", _lowercase, flags=re.MULTILINE )
# Only keep the top-level module
snake_case_ :Tuple = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
snake_case_ :Any = list(set(_lowercase ) )
snake_case_ :Optional[int] = []
for imp in imports:
try:
importlib.import_module(_lowercase )
except ImportError:
missing_packages.append(_lowercase )
if len(_lowercase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{', '.join(_lowercase )}. Run `pip install {' '.join(_lowercase )}`""" )
return get_relative_imports(_lowercase )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Any = module_path.replace(os.path.sep, """.""" )
snake_case_ :Dict = importlib.import_module(_lowercase )
if class_name is None:
return find_pipeline_class(_lowercase )
return getattr(_lowercase, _lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
snake_case_ :List[str] = dict(inspect.getmembers(_lowercase, inspect.isclass ) )
snake_case_ :str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls, _lowercase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
snake_case_ :Tuple = cls
return pipeline_class
def A_ ( _lowercase, _lowercase, _lowercase = None, _lowercase = False, _lowercase = False, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = False, ):
'''simple docstring'''
snake_case_ :List[Any] = str(_lowercase )
snake_case_ :Dict = os.path.join(_lowercase, _lowercase )
if os.path.isfile(_lowercase ):
snake_case_ :Union[str, Any] = module_file_or_url
snake_case_ :List[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
snake_case_ :str = get_diffusers_versions()
# cut ".dev0"
snake_case_ :List[Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
snake_case_ :Dict = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
snake_case_ :Tuple = f"""v{revision}"""
elif revision == "main":
snake_case_ :List[str] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
snake_case_ :Optional[Any] = COMMUNITY_PIPELINES_URL.format(revision=_lowercase, pipeline=_lowercase )
try:
snake_case_ :int = cached_download(
_lowercase, cache_dir=_lowercase, force_download=_lowercase, proxies=_lowercase, resume_download=_lowercase, local_files_only=_lowercase, use_auth_token=_lowercase, )
snake_case_ :Any = """git"""
snake_case_ :int = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
snake_case_ :List[str] = hf_hub_download(
_lowercase, _lowercase, cache_dir=_lowercase, force_download=_lowercase, proxies=_lowercase, resume_download=_lowercase, local_files_only=_lowercase, use_auth_token=_lowercase, )
snake_case_ :Union[str, Any] = os.path.join("""local""", """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
snake_case_ :str = check_imports(_lowercase )
# Now we move the module inside our cached dynamic modules.
snake_case_ :List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowercase )
snake_case_ :Any = Path(_lowercase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowercase, submodule_path / module_file )
for module_needed in modules_needed:
snake_case_ :Optional[Any] = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowercase, _lowercase ), submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowercase, _lowercase ):
snake_case_ :str = use_auth_token
elif use_auth_token is True:
snake_case_ :List[str] = HfFolder.get_token()
else:
snake_case_ :List[Any] = None
snake_case_ :int = model_info(_lowercase, revision=_lowercase, token=_lowercase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
snake_case_ :Tuple = submodule_path / commit_hash
snake_case_ :int = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowercase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowercase, submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowercase, f"""{module_needed}.py""", cache_dir=_lowercase, force_download=_lowercase, resume_download=_lowercase, proxies=_lowercase, use_auth_token=_lowercase, revision=_lowercase, local_files_only=_lowercase, )
return os.path.join(_lowercase, _lowercase )
def A_ ( _lowercase, _lowercase, _lowercase = None, _lowercase = None, _lowercase = False, _lowercase = False, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = False, **_lowercase, ):
'''simple docstring'''
snake_case_ :Union[str, Any] = get_cached_module_file(
_lowercase, _lowercase, cache_dir=_lowercase, force_download=_lowercase, resume_download=_lowercase, proxies=_lowercase, use_auth_token=_lowercase, revision=_lowercase, local_files_only=_lowercase, )
return get_class_in_module(_lowercase, final_module.replace(""".py""", """""" ) )
| 310 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
super().__init__()
lowercase_ = torchvision.models.resnetaaa(pretrained=lowerCAmelCase_)
lowercase_ = list(model.children())[:-2]
lowercase_ = nn.Sequential(*lowerCAmelCase_)
lowercase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.pool(self.model(lowerCAmelCase_))
lowercase_ = torch.flatten(lowerCAmelCase_ , start_dim=2)
lowercase_ = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = [json.loads(lowerCAmelCase_) for l in open(lowerCAmelCase_)]
lowercase_ = os.path.dirname(lowerCAmelCase_)
lowercase_ = tokenizer
lowercase_ = labels
lowercase_ = len(lowerCAmelCase_)
lowercase_ = max_seq_length
lowercase_ = transforms
def __len__( self : List[Any]):
"""simple docstring"""
return len(self.data)
def __getitem__( self : Tuple , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCAmelCase_))
lowercase_ , lowercase_ , lowercase_ = sentence[0], sentence[1:-1], sentence[-1]
lowercase_ = sentence[: self.max_seq_length]
lowercase_ = torch.zeros(self.n_classes)
lowercase_ = 1
lowercase_ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""])).convert("""RGB""")
lowercase_ = self.transforms(lowerCAmelCase_)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Counter()
for row in self.data:
label_freqs.update(row["""label"""])
return label_freqs
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = [len(row["""sentence"""] ) for row in batch]
lowercase_ , lowercase_ = len(__lowerCAmelCase ), max(__lowerCAmelCase )
lowercase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
lowercase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
lowercase_ = input_row["""sentence"""]
lowercase_ = 1
lowercase_ = torch.stack([row["""image"""] for row in batch] )
lowercase_ = torch.stack([row["""label"""] for row in batch] )
lowercase_ = torch.stack([row["""image_start_token"""] for row in batch] )
lowercase_ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 567 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "decision_transformer"
lowercase__ = ["past_key_values"]
lowercase__ = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , lowerCAmelCase_ : Optional[Any]=1_7 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : str=1_2_8 , lowerCAmelCase_ : List[Any]=4_0_9_6 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : int=1_0_2_4 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=1E-5 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=5_0_2_5_6 , lowerCAmelCase_ : Any=5_0_2_5_6 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=False , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
lowercase_ = state_dim
lowercase_ = act_dim
lowercase_ = hidden_size
lowercase_ = max_ep_len
lowercase_ = action_tanh
lowercase_ = vocab_size
lowercase_ = n_positions
lowercase_ = n_layer
lowercase_ = n_head
lowercase_ = n_inner
lowercase_ = activation_function
lowercase_ = resid_pdrop
lowercase_ = embd_pdrop
lowercase_ = attn_pdrop
lowercase_ = layer_norm_epsilon
lowercase_ = initializer_range
lowercase_ = scale_attn_weights
lowercase_ = use_cache
lowercase_ = scale_attn_by_inverse_layer_idx
lowercase_ = reorder_and_upcast_attn
lowercase_ = bos_token_id
lowercase_ = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
| 567 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a_ :
def __init__( self :List[Any] , _lowercase :Dict) -> Union[str, Any]:
UpperCAmelCase_ = data
UpperCAmelCase_ = [0x6745_2301, 0xefcd_ab89, 0x98ba_dcfe, 0x1032_5476, 0xc3d2_e1f0]
@staticmethod
def __a ( _lowercase :Optional[int] , _lowercase :Optional[int]) -> Any:
return ((n << b) | (n >> (32 - b))) & 0xffff_ffff
def __a ( self :str) -> List[str]:
UpperCAmelCase_ = b'''\x80''' + b'''\x00''' * (63 - (len(self.data) + 8) % 64)
UpperCAmelCase_ = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data))
return padded_data
def __a ( self :Union[str, Any]) -> Optional[int]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def __a ( self :List[str] , _lowercase :int) -> str:
UpperCAmelCase_ = list(struct.unpack('''>16L''' , _lowercase)) + [0] * 64
for i in range(16 , 80):
UpperCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def __a ( self :str) -> int:
UpperCAmelCase_ = self.padding()
UpperCAmelCase_ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ = self.expand_block(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
UpperCAmelCase_ = (b & c) | ((~b) & d)
UpperCAmelCase_ = 0x5a82_7999
elif 20 <= i < 40:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0x6ed9_eba1
elif 40 <= i < 60:
UpperCAmelCase_ = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ = 0x8f1b_bcdc
elif 60 <= i < 80:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0xca62_c1d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
self.rotate(_lowercase , 5) + f + e + k + expanded_block[i] & 0xffff_ffff,
a,
self.rotate(_lowercase , 30),
c,
d,
)
UpperCAmelCase_ = (
self.h[0] + a & 0xffff_ffff,
self.h[1] + b & 0xffff_ffff,
self.h[2] + c & 0xffff_ffff,
self.h[3] + d & 0xffff_ffff,
self.h[4] + e & 0xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h)
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = b'''Test String'''
assert SHAaHash(__UpperCAmelCase ).final_hash() == hashlib.shaa(__UpperCAmelCase ).hexdigest() # noqa: S324
def A ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = bytes(__UpperCAmelCase , '''utf-8''' )
print(SHAaHash(__UpperCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 704 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase_ = logging.getLogger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="summarization"
UpperCamelCase__ : Union[str, Any] =["loss"]
UpperCamelCase__ : Tuple =ROUGE_KEYS
UpperCamelCase__ : List[Any] ="rouge2"
def __init__( self :int , _lowercase :List[Any] , **_lowercase :Tuple) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(_lowercase , num_labels=_lowercase , mode=self.mode , **_lowercase)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
UpperCAmelCase_ = Path(self.output_dir) / '''metrics.json'''
UpperCAmelCase_ = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path)
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(_lowercase)
UpperCAmelCase_ = self.config.model_type
UpperCAmelCase_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
UpperCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
UpperCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
UpperCAmelCase_ = get_git_info()['''repo_sha''']
UpperCAmelCase_ = hparams.num_workers
UpperCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowercase):
UpperCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ = self.decoder_start_token_id
UpperCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
UpperCAmelCase_ = False
UpperCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ = self.model.config.max_length
UpperCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __a ( self :List[Any] , _lowercase :Dict[str, torch.Tensor]) -> Dict[str, List[str]]:
UpperCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_lowercase , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
UpperCAmelCase_ = True
return readable_batch
def __a ( self :Dict , _lowercase :Optional[Any] , **_lowercase :List[Any]) -> str:
return self.model(_lowercase , **_lowercase)
def __a ( self :Tuple , _lowercase :List[int]) -> str:
UpperCAmelCase_ = self.tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase)
return lmap(str.strip , _lowercase)
def __a ( self :List[str] , _lowercase :dict) -> Tuple:
UpperCAmelCase_ = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ = batch['''input_ids'''], batch['''attention_mask''']
UpperCAmelCase_ = batch['''labels''']
if isinstance(self.model , _lowercase):
UpperCAmelCase_ = self.model._shift_right(_lowercase)
else:
UpperCAmelCase_ = shift_tokens_right(_lowercase , _lowercase)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ = decoder_input_ids
self.save_readable_batch(_lowercase)
UpperCAmelCase_ = self(_lowercase , attention_mask=_lowercase , decoder_input_ids=_lowercase , use_cache=_lowercase)
UpperCAmelCase_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ = nn.CrossEntropyLoss(ignore_index=_lowercase)
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
UpperCAmelCase_ = nn.functional.log_softmax(_lowercase , dim=-1)
UpperCAmelCase_ , UpperCAmelCase_ = label_smoothed_nll_loss(
_lowercase , _lowercase , self.hparams.label_smoothing , ignore_index=_lowercase)
return (loss,)
@property
def __a ( self :List[Any]) -> int:
return self.tokenizer.pad_token_id
def __a ( self :Any , _lowercase :Tuple , _lowercase :Optional[int]) -> Dict:
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
# tokens per batch
UpperCAmelCase_ = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].shape[0]
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __a ( self :Union[str, Any] , _lowercase :int , _lowercase :List[Any]) -> Dict:
return self._generative_step(_lowercase)
def __a ( self :int , _lowercase :List[str] , _lowercase :List[Any]="val") -> Dict:
self.step_count += 1
UpperCAmelCase_ = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
UpperCAmelCase_ = losses['''loss''']
UpperCAmelCase_ = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
UpperCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ = torch.tensor(_lowercase).type_as(_lowercase)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(_lowercase)
UpperCAmelCase_ = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase_ = self.step_count
self.metrics[prefix].append(_lowercase) # callback writes this to self.metrics_save_path
UpperCAmelCase_ = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __a ( self :int , _lowercase :Optional[int] , _lowercase :Dict) -> Dict:
return calculate_rouge(_lowercase , _lowercase)
def __a ( self :Optional[Any] , _lowercase :dict) -> dict:
UpperCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
UpperCAmelCase_ = self.ids_to_clean_text(_lowercase)
UpperCAmelCase_ = self.ids_to_clean_text(batch['''labels'''])
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
UpperCAmelCase_ = self.calc_generative_metrics(_lowercase , _lowercase)
UpperCAmelCase_ = np.mean(lmap(_lowercase , _lowercase))
base_metrics.update(gen_time=_lowercase , gen_len=_lowercase , preds=_lowercase , target=_lowercase , **_lowercase)
return base_metrics
def __a ( self :Optional[Any] , _lowercase :int , _lowercase :Optional[Any]) -> Optional[int]:
return self._generative_step(_lowercase)
def __a ( self :str , _lowercase :List[Any]) -> List[Any]:
return self.validation_epoch_end(_lowercase , prefix='''test''')
def __a ( self :Union[str, Any] , _lowercase :Optional[int]) -> SeqaSeqDataset:
UpperCAmelCase_ = self.n_obs[type_path]
UpperCAmelCase_ = self.target_lens[type_path]
UpperCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=_lowercase , n_obs=_lowercase , max_target_length=_lowercase , **self.dataset_kwargs , )
return dataset
def __a ( self :str , _lowercase :str , _lowercase :int , _lowercase :bool = False) -> DataLoader:
UpperCAmelCase_ = self.get_dataset(_lowercase)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_sortish_sampler(_lowercase , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_sampler=_lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
def __a ( self :int) -> DataLoader:
UpperCAmelCase_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_lowercase)
return dataloader
def __a ( self :int) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def __a ( self :List[str]) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def __a ( _lowercase :List[Any] , _lowercase :str) -> List[Any]:
BaseTransformer.add_model_specific_args(_lowercase , _lowercase)
add_generic_args(_lowercase , _lowercase)
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--max_tokens_per_batch''' , type=_lowercase , default=_lowercase)
parser.add_argument('''--logger_name''' , type=_lowercase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=_lowercase , default=500 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=_lowercase , default='''summarization''' , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=_lowercase , default=0.0 , required=_lowercase)
parser.add_argument('''--src_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--tgt_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--eval_beams''' , type=_lowercase , default=_lowercase , required=_lowercase)
parser.add_argument(
'''--val_metric''' , type=_lowercase , default=_lowercase , required=_lowercase , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=_lowercase , default=_lowercase , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=_lowercase , default=1 , required=_lowercase , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=_lowercase , default=-1 , required=_lowercase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class a_ ( _snake_case ):
UpperCamelCase__ : str ="translation"
UpperCamelCase__ : str =["loss"]
UpperCamelCase__ : Optional[int] =["bleu"]
UpperCamelCase__ : List[str] ="bleu"
def __init__( self :Optional[int] , _lowercase :Optional[Any] , **_lowercase :Union[str, Any]) -> int:
super().__init__(_lowercase , **_lowercase)
UpperCAmelCase_ = hparams.src_lang
UpperCAmelCase_ = hparams.tgt_lang
def __a ( self :Dict , _lowercase :str , _lowercase :List[Any]) -> dict:
return calculate_bleu(_lowercase , _lowercase)
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__UpperCAmelCase )
check_output_dir(__UpperCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ = SummarizationModule(__UpperCAmelCase )
else:
UpperCAmelCase_ = TranslationModule(__UpperCAmelCase )
UpperCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
UpperCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = os.environ.get('''WANDB_PROJECT''' , __UpperCAmelCase )
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__UpperCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = args.val_metric == '''loss'''
UpperCAmelCase_ = generic_train(
__UpperCAmelCase , __UpperCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCAmelCase ) , early_stopping_callback=__UpperCAmelCase , logger=__UpperCAmelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__UpperCAmelCase ) )
if checkpoints:
UpperCAmelCase_ = checkpoints[-1]
UpperCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = pl.Trainer.add_argparse_args(parser)
UpperCamelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase_ = parser.parse_args()
main(args)
| 561 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int=0 ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = np.random.RandomState(lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = pipe(**lowerCAmelCase__ )
_UpperCamelCase = output.images[0, -3:, -3:, -1]
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * [inputs.pop('''prompt''' )]
_UpperCamelCase = pipe.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''np''' , )
_UpperCamelCase = text_inputs['''input_ids''']
_UpperCamelCase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_UpperCamelCase = prompt_embeds
# forward
_UpperCamelCase = pipe(**lowerCAmelCase__ )
_UpperCamelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * ['''this is a negative prompt''']
_UpperCamelCase = negative_prompt
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = pipe(**lowerCAmelCase__ )
_UpperCamelCase = output.images[0, -3:, -3:, -1]
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * [inputs.pop('''prompt''' )]
_UpperCamelCase = []
for p in [prompt, negative_prompt]:
_UpperCamelCase = pipe.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''np''' , )
_UpperCamelCase = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_UpperCamelCase , _UpperCamelCase = embeds
# forward
_UpperCamelCase = pipe(**lowerCAmelCase__ )
_UpperCamelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Any ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def snake_case__ ( self : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
_UpperCamelCase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : str ) -> Dict:
'''simple docstring'''
_UpperCamelCase = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = '''open neural network exchange'''
_UpperCamelCase = np.random.RandomState(0 )
_UpperCamelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type='''np''' )
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = '''open neural network exchange'''
_UpperCamelCase = np.random.RandomState(0 )
_UpperCamelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type='''np''' )
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 0
def test_callback_fn(lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : np.ndarray ) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_UpperCamelCase = False
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = '''Andromeda galaxy in a bottle'''
_UpperCamelCase = np.random.RandomState(0 )
pipe(
prompt=lowerCAmelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert pipe.safety_checker is None
_UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 98 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = AltDiffusionPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase_ = 77
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(UpperCamelCase__ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = """A photo of an astronaut"""
lowercase_ = alt_pipe(**UpperCamelCase__ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(UpperCamelCase__ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = alt_pipe(**UpperCamelCase__ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """A painting of a squirrel eating a burger"""
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
lowercase_ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """A painting of a squirrel eating a burger"""
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 412 | 0 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowerCamelCase, lowerCamelCase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowercase :List[str] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase :Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
return int((input_a, input_a).count(0 ) != 0 )
def UpperCAmelCase__ ( ):
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 453 | 0 |
import argparse
import json
import subprocess
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Dict:
lowercase__ : Optional[Any] = []
lowercase__ : List[str] = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
lowercase__ : List[str] = subprocess.run(SCREAMING_SNAKE_CASE_ ,shell=SCREAMING_SNAKE_CASE_ ,stdout=subprocess.PIPE )
lowercase__ : str = output.stdout.decode("utf-8" )
lowercase__ : Any = json.loads(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open("offline_runners.txt" ,"w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ : int = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
return values.split("," )
__a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 397 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a : int = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if args.student_type == "roberta":
lowercase__ : List[str] = False
elif args.student_type == "gpt2":
lowercase__ : Dict = False
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
if args.student_type == "roberta":
lowercase__ : str = False
def snake_case_ ( ) -> Optional[Any]:
lowercase__ : Optional[int] = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" ,action="store_true" ,help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" ,type=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" ,type=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." ,)
parser.add_argument(
"--student_type" ,type=SCREAMING_SNAKE_CASE_ ,choices=["distilbert", "roberta", "gpt2"] ,required=SCREAMING_SNAKE_CASE_ ,help="The student type (DistilBERT, RoBERTa)." ,)
parser.add_argument("--student_config" ,type=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" ,default=SCREAMING_SNAKE_CASE_ ,type=SCREAMING_SNAKE_CASE_ ,help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" ,choices=["bert", "roberta", "gpt2"] ,required=SCREAMING_SNAKE_CASE_ ,help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" ,type=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,help="The teacher model." )
parser.add_argument("--temperature" ,default=2.0 ,type=SCREAMING_SNAKE_CASE_ ,help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" ,default=0.5 ,type=SCREAMING_SNAKE_CASE_ ,help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" ,default=0.0 ,type=SCREAMING_SNAKE_CASE_ ,help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." ,)
parser.add_argument("--alpha_clm" ,default=0.5 ,type=SCREAMING_SNAKE_CASE_ ,help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" ,default=0.0 ,type=SCREAMING_SNAKE_CASE_ ,help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" ,default=0.0 ,type=SCREAMING_SNAKE_CASE_ ,help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" ,action="store_true" ,help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" ,default=0.15 ,type=SCREAMING_SNAKE_CASE_ ,help="Proportion of tokens for which we need to make a prediction." ,)
parser.add_argument("--word_mask" ,default=0.8 ,type=SCREAMING_SNAKE_CASE_ ,help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" ,default=0.1 ,type=SCREAMING_SNAKE_CASE_ ,help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" ,default=0.1 ,type=SCREAMING_SNAKE_CASE_ ,help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" ,default=0.7 ,type=SCREAMING_SNAKE_CASE_ ,help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." ,)
parser.add_argument("--token_counts" ,type=SCREAMING_SNAKE_CASE_ ,help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" ,action="store_true" ,help="If true, compute the distillation loss only the [MLM] prediction distribution." ,)
parser.add_argument(
"--freeze_pos_embs" ,action="store_true" ,help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." ,)
parser.add_argument(
"--freeze_token_type_embds" ,action="store_true" ,help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." ,)
parser.add_argument("--n_epoch" ,type=SCREAMING_SNAKE_CASE_ ,default=3 ,help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" ,type=SCREAMING_SNAKE_CASE_ ,default=5 ,help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" ,action="store_false" ,help="If true, group sequences that have similar length into the same batch. Default is true." ,)
parser.add_argument(
"--gradient_accumulation_steps" ,type=SCREAMING_SNAKE_CASE_ ,default=50 ,help="Gradient accumulation for larger training batches." ,)
parser.add_argument("--warmup_prop" ,default=0.05 ,type=SCREAMING_SNAKE_CASE_ ,help="Linear warmup proportion." )
parser.add_argument("--weight_decay" ,default=0.0 ,type=SCREAMING_SNAKE_CASE_ ,help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" ,default=5E-4 ,type=SCREAMING_SNAKE_CASE_ ,help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" ,default=1E-6 ,type=SCREAMING_SNAKE_CASE_ ,help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" ,default=5.0 ,type=SCREAMING_SNAKE_CASE_ ,help="Max gradient norm." )
parser.add_argument("--initializer_range" ,default=0.02 ,type=SCREAMING_SNAKE_CASE_ ,help="Random initialization range." )
parser.add_argument(
"--fp16" ,action="store_true" ,help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" ,)
parser.add_argument(
"--fp16_opt_level" ,type=SCREAMING_SNAKE_CASE_ ,default="O1" ,help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) ,)
parser.add_argument("--n_gpu" ,type=SCREAMING_SNAKE_CASE_ ,default=1 ,help="Number of GPUs in the node." )
parser.add_argument("--local_rank" ,type=SCREAMING_SNAKE_CASE_ ,default=-1 ,help="Distributed training - Local rank" )
parser.add_argument("--seed" ,type=SCREAMING_SNAKE_CASE_ ,default=56 ,help="Random seed" )
parser.add_argument("--log_interval" ,type=SCREAMING_SNAKE_CASE_ ,default=5_00 ,help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" ,type=SCREAMING_SNAKE_CASE_ ,default=40_00 ,help="Checkpoint interval." )
lowercase__ : Optional[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE_ )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE_ )
set_seed(SCREAMING_SNAKE_CASE_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path ,"parameters.json" ) ,"w" ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,indent=4 )
git_log(args.dump_path )
lowercase__ , lowercase__ , lowercase__ : int = MODEL_CLASSES[args.student_type]
lowercase__ , lowercase__ , lowercase__ : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase__ : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase__ : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase__ : Optional[int] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
lowercase__ : Union[str, Any] = special_tok_ids
lowercase__ : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file ,"rb" ) as fp:
lowercase__ : Optional[Any] = pickle.load(SCREAMING_SNAKE_CASE_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts ,"rb" ) as fp:
lowercase__ : Dict = pickle.load(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = np.maximum(SCREAMING_SNAKE_CASE_ ,1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase__ : int = 0.0 # do not predict special tokens
lowercase__ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Union[str, Any] = None
lowercase__ : List[Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE_ ,data=SCREAMING_SNAKE_CASE_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
lowercase__ : Optional[Any] = student_config_class.from_pretrained(args.student_config )
lowercase__ : str = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
lowercase__ : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights ,config=SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Tuple = student_model_class(SCREAMING_SNAKE_CASE_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
lowercase__ : Any = teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=SCREAMING_SNAKE_CASE_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase__ : Dict = Distiller(
params=SCREAMING_SNAKE_CASE_ ,dataset=SCREAMING_SNAKE_CASE_ ,token_probs=SCREAMING_SNAKE_CASE_ ,student=SCREAMING_SNAKE_CASE_ ,teacher=SCREAMING_SNAKE_CASE_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main() | 397 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 720 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = '''poolformer'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Union[str, Any]=1_6 , __lowerCAmelCase : int=3 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : Union[str, Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : int=[7, 3, 3, 3] , __lowerCAmelCase : Any=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[2, 1, 1, 1] , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : Dict=0.02 , **__lowerCAmelCase : Dict , ):
__snake_case = num_channels
__snake_case = patch_size
__snake_case = stride
__snake_case = padding
__snake_case = pool_size
__snake_case = hidden_sizes
__snake_case = mlp_ratio
__snake_case = depths
__snake_case = patch_sizes
__snake_case = strides
__snake_case = num_encoder_blocks
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_layer_scale
__snake_case = layer_scale_init_value
__snake_case = initializer_range
super().__init__(**__lowerCAmelCase )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = version.parse('''1.11''' )
@property
def lowercase__ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self : Tuple ):
return 2E-3
| 427 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
_UpperCAmelCase : Optional[Any] = Vector()
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(_UpperCAmelCase) , '''(0,0,0,0,0,1)''')
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = Vector([1, 2, 3, 4])
self.assertEqual(len(_UpperCAmelCase) , 4)
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = Vector([1, 2])
_UpperCAmelCase : Optional[Any] = Vector([1, 2, 3, 4, 5])
_UpperCAmelCase : int = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_UpperCAmelCase : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = Vector([1, 2, 3])
_UpperCAmelCase : Optional[int] = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = Vector([1, 2, 3])
_UpperCAmelCase : List[str] = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = Vector([1, 2, 3])
_UpperCAmelCase : Tuple = Vector([2, -1, 4]) # for test of dot product
_UpperCAmelCase : Dict = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , '''(3.0,6.0,9.0)''')
self.assertEqual((a * b) , 0)
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
self.assertEqual(str(zero_vector(10)).count('''0''') , 10)
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , '''(0,1,0)''')
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = Vector([1, 2, 3])
_UpperCAmelCase : int = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase)) , '''(3,4,7)''')
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = Vector([1, 0, 0, 0, 0, 0])
_UpperCAmelCase : Any = x.copy()
self.assertEqual(str(_UpperCAmelCase) , str(_UpperCAmelCase))
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(_UpperCAmelCase) , '''(0,1,0)''')
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(_UpperCAmelCase))
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase : Tuple = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase))
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase))
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
_UpperCAmelCase : Optional[int] = Vector([1, 2, 3])
self.assertEqual('''(14,32,50)''' , str(a * x))
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2))
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(_UpperCAmelCase))
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b))
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b))
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 485 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
_lowerCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_lowercase = input("""Enter numbers separated by a comma:\n""").strip()
_lowercase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 162 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowercase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.