code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Any:
"""simple docstring"""
A : Tuple = data
A : Optional[Any] = previous
A : Union[str, Any] = next_node
def __str__( self ) -> str:
"""simple docstring"""
return F'{self.data}'
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.data
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return self.next
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.previous
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = head
def __iter__( self ) -> Any:
"""simple docstring"""
return self
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
if not self.current:
raise StopIteration
else:
A : List[str] = self.current.get_data()
A : Union[str, Any] = self.current.get_next()
return value
class A :
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
A : int = None # First node in list
A : str = None # Last node in list
def __str__( self ) -> int:
"""simple docstring"""
A : int = self.head
A : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
A : List[str] = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE ) for node in nodes )
def __contains__( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : str = self.head
while current:
if current.get_data() == value:
return True
A : Optional[int] = current.get_next()
return False
def __iter__( self ) -> int:
"""simple docstring"""
return LinkedListIterator(self.head )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.head is None:
A : Any = node
A : List[Any] = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : List[Any] = Node(SCREAMING_SNAKE_CASE )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE )
else:
self.set_tail(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = node
A : int = node.previous
if node.get_previous() is None:
A : int = node_to_insert
else:
A : Tuple = node_to_insert
A : Optional[int] = node_to_insert
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = node
A : int = node.next
if node.get_next() is None:
A : Optional[int] = node_to_insert
else:
A : Tuple = node_to_insert
A : List[str] = node_to_insert
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : int = 1
A : int = Node(SCREAMING_SNAKE_CASE )
A : List[str] = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
current_position += 1
A : Any = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Node:
"""simple docstring"""
A : str = self.head
while node:
if node.get_data() == item:
return node
A : int = node.get_next()
raise Exception('''Node not found''' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if (node := self.get_node(SCREAMING_SNAKE_CASE )) is not None:
if node == self.head:
A : Optional[Any] = self.head.get_next()
if node == self.tail:
A : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if node.get_next():
A : Union[str, Any] = node.previous
if node.get_previous():
A : Optional[int] = node.next
A : int = None
A : Optional[int] = None
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.head is None
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(lowercase__ , lowercase__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase__ : str =""""""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Dict="", lowerCamelCase : Tuple="train" )-> Dict:
assert os.path.isdir(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Dict =os.listdir(lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ : Optional[int] =os.path.join(lowerCamelCase, lowerCamelCase )
if not os.path.isfile(lowerCamelCase ):
continue
self.documents.append(lowerCamelCase )
def __len__( self : Optional[Any] )-> List[str]:
return len(self.documents )
def __getitem__( self : List[str], lowerCamelCase : Dict )-> str:
lowerCamelCase__ : int =self.documents[idx]
lowerCamelCase__ : List[Any] =document_path.split('''/''' )[-1]
with open(lowerCamelCase, encoding='''utf-8''' ) as source:
lowerCamelCase__ : Optional[int] =source.read()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =process_story(lowerCamelCase )
return document_name, story_lines, summary_lines
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =list(filter(lambda __lowerCamelCase : len(__lowerCamelCase ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ : Dict =[_add_missing_period(__lowerCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Optional[Any] =deque(__lowerCamelCase )
while True:
try:
lowerCamelCase__ : Tuple =lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__lowerCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ : Dict =list(filter(lambda __lowerCamelCase : not t.startswith('''@highlight''' ) , __lowerCamelCase ) )
return story_lines, summary_lines
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Any =['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if len(__lowerCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowerCamelCase )) )
return sequence
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : int =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Any =sequence == pad_token_id
lowerCamelCase__ : List[str] =0
return mask
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =[tokenizer.encode(__lowerCamelCase ) for line in story_lines]
lowerCamelCase__ : List[Any] =[token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ : List[Any] =[tokenizer.encode(__lowerCamelCase ) for line in summary_lines]
lowerCamelCase__ : Optional[int] =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Any =[]
for sequence in batch:
lowerCamelCase__ : Optional[int] =-1
lowerCamelCase__ : List[str] =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowerCamelCase )
return torch.tensor(__lowerCamelCase )
| 272 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A__: List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A__: int = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A__: str = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A__: str = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A__: int = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any]=[1, 10, 100] , __lowerCamelCase: Dict=4 , __lowerCamelCase: List[str]=3.0 ):
'''simple docstring'''
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=__lowerCamelCase ) as executor:
UpperCamelCase__: int = []
UpperCamelCase__: List[Any] = Counter()
UpperCamelCase__: Tuple = 0
UpperCamelCase__: List[str] = defaultdict(__lowerCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
for candidate in candidates:
UpperCamelCase__: List[Any] = candidate + "\n" + test_case
UpperCamelCase__: Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id])
UpperCamelCase__: Any = executor.submit(__lowerCamelCase , *__lowerCamelCase )
futures.append(__lowerCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCamelCase ):
UpperCamelCase__: Any = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
UpperCamelCase__ , UpperCamelCase__: int = [], []
for result in results.values():
result.sort()
UpperCamelCase__: str = [r[1]["passed"] for r in result]
total.append(len(__lowerCamelCase ) )
correct.append(sum(__lowerCamelCase ) )
UpperCamelCase__: List[str] = np.array(__lowerCamelCase )
UpperCamelCase__: int = np.array(__lowerCamelCase )
UpperCamelCase__: Any = k
UpperCamelCase__: Any = {F"pass@{k}": estimate_pass_at_k(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase_ ( A_ ,A_ ,A_):
def estimator(A_ ,A_ ,A_) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1))
if isinstance(A_ ,A_):
UpperCamelCase__: Union[str, Any] = itertools.repeat(A_ ,len(A_))
else:
assert len(A_) == len(A_)
UpperCamelCase__: Any = iter(A_)
return np.array([estimator(int(A_) ,int(A_) ,A_) for n, c in zip(A_ ,A_)])
| 149 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 5_0257 , __lowerCamelCase: int = 1024 , __lowerCamelCase: int = 768 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 12 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: str = "gelu_new" , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 1e-5 , __lowerCamelCase: float = 0.02 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__: Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
UpperCamelCase__: List[str] = prefix_inner_dim
UpperCamelCase__: Optional[int] = prefix_hidden_dim
UpperCamelCase__: Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase__: Tuple = (
nn.Linear(self.prefix_hidden_dim , __lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase__: List[str] = GPTaConfig(
vocab_size=__lowerCamelCase , n_positions=__lowerCamelCase , n_embd=__lowerCamelCase , n_layer=__lowerCamelCase , n_head=__lowerCamelCase , n_inner=__lowerCamelCase , activation_function=__lowerCamelCase , resid_pdrop=__lowerCamelCase , embd_pdrop=__lowerCamelCase , attn_pdrop=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , initializer_range=__lowerCamelCase , scale_attn_weights=__lowerCamelCase , use_cache=__lowerCamelCase , scale_attn_by_inverse_layer_idx=__lowerCamelCase , reorder_and_upcast_attn=__lowerCamelCase , )
UpperCamelCase__: Any = GPTaLMHeadModel(__lowerCamelCase )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
UpperCamelCase__: Dict = self.encode_prefix(__lowerCamelCase )
UpperCamelCase__: List[Any] = self.decode_prefix(__lowerCamelCase )
UpperCamelCase__: str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase__: Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase__: Any = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase__: str = self.transformer(inputs_embeds=__lowerCamelCase , labels=__lowerCamelCase , attention_mask=__lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: int , __lowerCamelCase: torch.device ):
'''simple docstring'''
return torch.zeros(__lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return self.encode_prefix(__lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = torch.split(__lowerCamelCase , 1 , dim=0 )
UpperCamelCase__: Dict = []
UpperCamelCase__: Union[str, Any] = []
for feature in features:
UpperCamelCase__: Tuple = self.decode_prefix(feature.to(__lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.generate_beam(
input_embeds=__lowerCamelCase , device=__lowerCamelCase , eos_token_id=__lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: int = 5 , __lowerCamelCase: int = 67 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: Optional[int] = None , ):
'''simple docstring'''
UpperCamelCase__: Tuple = eos_token_id
UpperCamelCase__: List[str] = None
UpperCamelCase__: Any = None
UpperCamelCase__: Optional[int] = torch.ones(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.int )
UpperCamelCase__: Dict = torch.zeros(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase__: Dict = input_embeds
else:
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCamelCase__: Union[str, Any] = self.transformer(inputs_embeds=__lowerCamelCase )
UpperCamelCase__: Tuple = outputs.logits
UpperCamelCase__: Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase__: List[str] = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = logits.topk(__lowerCamelCase , -1 )
UpperCamelCase__: str = generated.expand(__lowerCamelCase , *generated.shape[1:] )
UpperCamelCase__ , UpperCamelCase__: Dict = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase__: int = next_tokens
else:
UpperCamelCase__: Optional[int] = tokens.expand(__lowerCamelCase , *tokens.shape[1:] )
UpperCamelCase__: str = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase__: Optional[Any] = -float(np.inf )
UpperCamelCase__: Any = 0
UpperCamelCase__: List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase__: Any = scores_sum / seq_lengths[:, None]
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = scores_sum_average.view(-1 ).topk(__lowerCamelCase , -1 )
UpperCamelCase__: Dict = next_tokens // scores_sum.shape[1]
UpperCamelCase__: Optional[int] = seq_lengths[next_tokens_source]
UpperCamelCase__: int = next_tokens % scores_sum.shape[1]
UpperCamelCase__: Optional[int] = next_tokens.unsqueeze(1 )
UpperCamelCase__: Tuple = tokens[next_tokens_source]
UpperCamelCase__: Tuple = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase__: List[Any] = generated[next_tokens_source]
UpperCamelCase__: int = scores_sum_average * seq_lengths
UpperCamelCase__: Dict = is_stopped[next_tokens_source]
UpperCamelCase__: List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase__: Any = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase__: Union[str, Any] = is_stopped + next_tokens.eq(__lowerCamelCase ).squeeze()
if is_stopped.all():
break
UpperCamelCase__: Optional[Any] = scores / seq_lengths
UpperCamelCase__: int = scores.argsort(descending=__lowerCamelCase )
# tokens tensors are already padded to max_seq_length
UpperCamelCase__: Dict = [tokens[i] for i in order]
UpperCamelCase__: Any = torch.stack(__lowerCamelCase , dim=0 )
UpperCamelCase__: int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149 | 1 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def a_ ( _lowercase , _lowercase , _lowercase=[] ):
_UpperCamelCase : List[str] = size[0] - overlap_pixels * 2
_UpperCamelCase : Optional[int] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_UpperCamelCase : int = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_UpperCamelCase : Union[str, Any] = np.pad(_lowercase , mode='''linear_ramp''' , pad_width=_lowercase , end_values=0 )
if "l" in remove_borders:
_UpperCamelCase : Tuple = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_UpperCamelCase : List[str] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_UpperCamelCase : Union[str, Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_UpperCamelCase : Dict = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def a_ ( _lowercase , _lowercase , _lowercase ):
return max(_lowercase , min(_lowercase , _lowercase ) )
def a_ ( _lowercase , _lowercase , _lowercase ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def a_ ( _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : List[Any] = list(_lowercase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_UpperCamelCase : Union[str, Any] = clamp_rect(_lowercase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : Any = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowercase , (original_slice, 0) )
return result
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_UpperCamelCase : Any = tile.crop(_lowercase )
return tile
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : List[str] = n % d
return n - divisor
class _a ( _lowerCAmelCase ):
def __init__( self : Tuple, lowerCAmelCase__ : AutoencoderKL, lowerCAmelCase__ : CLIPTextModel, lowerCAmelCase__ : CLIPTokenizer, lowerCAmelCase__ : UNetaDConditionModel, lowerCAmelCase__ : DDPMScheduler, lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCAmelCase__ : int = 3_5_0, ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, tokenizer=lowerCAmelCase__, unet=lowerCAmelCase__, low_res_scheduler=lowerCAmelCase__, scheduler=lowerCAmelCase__, max_noise_level=lowerCAmelCase__, )
def snake_case ( self : Optional[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Dict, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[int], **lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = (
min(image.size[0] - (tile_size + original_image_slice), x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice), y * tile_size ),
min(image.size[0], (x + 1) * tile_size ),
min(image.size[1], (y + 1) * tile_size ),
)
_UpperCamelCase : Optional[Any] = add_overlap_rect(lowerCAmelCase__, lowerCAmelCase__, image.size )
_UpperCamelCase : Optional[int] = image.crop(lowerCAmelCase__ )
_UpperCamelCase : List[str] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_UpperCamelCase : Union[str, Any] = translated_slice_x - (original_image_slice / 2)
_UpperCamelCase : Optional[Any] = max(0, lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = squeeze_tile(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : int = to_input.size
_UpperCamelCase : int = to_input.resize((tile_size, tile_size), Image.BICUBIC )
_UpperCamelCase : int = super(lowerCAmelCase__, self ).__call__(image=lowerCAmelCase__, **lowerCAmelCase__ ).images[0]
_UpperCamelCase : int = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC )
_UpperCamelCase : str = unsqueeze_tile(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : int = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC )
_UpperCamelCase : str = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
_UpperCamelCase : Tuple = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=lowerCAmelCase__ ), mode='''L''', )
final_image.paste(
lowerCAmelCase__, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any], lowerCAmelCase__ : Union[str, List[str]], lowerCAmelCase__ : Union[PIL.Image.Image, List[PIL.Image.Image]], lowerCAmelCase__ : int = 7_5, lowerCAmelCase__ : float = 9.0, lowerCAmelCase__ : int = 5_0, lowerCAmelCase__ : Optional[Union[str, List[str]]] = None, lowerCAmelCase__ : Optional[int] = 1, lowerCAmelCase__ : float = 0.0, lowerCAmelCase__ : Optional[torch.Generator] = None, lowerCAmelCase__ : Optional[torch.FloatTensor] = None, lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCAmelCase__ : int = 1, lowerCAmelCase__ : int = 1_2_8, lowerCAmelCase__ : int = 3_2, lowerCAmelCase__ : int = 3_2, ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.new('''RGB''', (image.size[0] * 4, image.size[1] * 4) )
_UpperCamelCase : int = math.ceil(image.size[0] / tile_size )
_UpperCamelCase : Optional[Any] = math.ceil(image.size[1] / tile_size )
_UpperCamelCase : Union[str, Any] = tcx * tcy
_UpperCamelCase : Union[str, Any] = 0
for y in range(lowerCAmelCase__ ):
for x in range(lowerCAmelCase__ ):
self._process_tile(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, prompt=lowerCAmelCase__, num_inference_steps=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, noise_level=lowerCAmelCase__, negative_prompt=lowerCAmelCase__, num_images_per_prompt=lowerCAmelCase__, eta=lowerCAmelCase__, generator=lowerCAmelCase__, latents=lowerCAmelCase__, )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def a_ ( ):
# Run a demo
_UpperCamelCase : str = '''stabilityai/stable-diffusion-x4-upscaler'''
_UpperCamelCase : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowercase , revision='''fp16''' , torch_dtype=torch.floataa )
_UpperCamelCase : Optional[int] = pipe.to('''cuda''' )
_UpperCamelCase : Optional[Any] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(_lowercase ):
print(F"""progress: {obj['progress']:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
_UpperCamelCase : Tuple = pipe(image=_lowercase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=_lowercase )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 128 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _a :
def __init__( self : Dict, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Optional[int]=1_3, lowerCAmelCase__ : Optional[Any]=7, lowerCAmelCase__ : Optional[Any]=True, lowerCAmelCase__ : Any=True, lowerCAmelCase__ : str=True, lowerCAmelCase__ : Any=9_9, lowerCAmelCase__ : Dict=3_2, lowerCAmelCase__ : List[Any]=5, lowerCAmelCase__ : Tuple=4, lowerCAmelCase__ : List[Any]=3_7, lowerCAmelCase__ : Tuple="gelu", lowerCAmelCase__ : Any=0.1, lowerCAmelCase__ : Optional[Any]=0.1, lowerCAmelCase__ : Dict=5_1_2, lowerCAmelCase__ : List[str]=1_6, lowerCAmelCase__ : Tuple=2, lowerCAmelCase__ : int=0.02, lowerCAmelCase__ : int=3, lowerCAmelCase__ : Optional[Any]=4, lowerCAmelCase__ : Dict=None, ) -> int:
'''simple docstring'''
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : Tuple = use_token_type_ids
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : int = type_vocab_size
_UpperCamelCase : List[str] = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : int = num_labels
_UpperCamelCase : List[str] = num_choices
_UpperCamelCase : str = scope
_UpperCamelCase : Optional[int] = self.vocab_size - 1
def snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_UpperCamelCase : List[str] = None
if self.use_token_type_ids:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : str = None
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_UpperCamelCase : Dict = ids_tensor([self.batch_size], self.num_choices )
_UpperCamelCase : str = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
_UpperCamelCase : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : List[str], lowerCAmelCase__ : List[str], lowerCAmelCase__ : List[str], *lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, head_mask=lowerCAmelCase__ )
_UpperCamelCase : Any = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__ )
_UpperCamelCase : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Any, lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : Any, lowerCAmelCase__ : Optional[Any], *lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Tuple = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[int], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Any, lowerCAmelCase__ : List[Any], *lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[str], lowerCAmelCase__ : Dict, lowerCAmelCase__ : Dict, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[Any], *lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_UpperCamelCase : Union[str, Any] = model(lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Tuple = config_and_inputs
_UpperCamelCase : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Any, lowerCAmelCase__ : List[str], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str], lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case ( self : str, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = super()._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__, return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCamelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=lowerCAmelCase__, )
_UpperCamelCase : Tuple = inputs_dict['''labels''']
_UpperCamelCase : List[str] = inputs_dict['''labels''']
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=lowerCAmelCase__, )
_UpperCamelCase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase__ )
return inputs_dict
def snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = OpenAIGPTModelTester(self )
_UpperCamelCase : int = ConfigTester(self, config_class=lowerCAmelCase__, n_embd=3_7 )
def snake_case ( self : Optional[int] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def snake_case ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def snake_case ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def snake_case ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def snake_case ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
_UpperCamelCase : str = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=lowerCAmelCase__ ) # the president is
_UpperCamelCase : Optional[int] = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCamelCase : Union[str, Any] = model.generate(lowerCAmelCase__, do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist(), lowerCAmelCase__ )
| 128 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int ) -> List[Any]:
if num <= 0:
UpperCAmelCase_ : int = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(A )
UpperCAmelCase_ : int = [True] * (num + 1)
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[Any] = int(math.sqrt(A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A )
# Set multiples of start be False
for i in range(start * start , num + 1 , A ):
if sieve[i] is True:
UpperCAmelCase_ : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 304 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
return getitem, k
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any] ):
"""simple docstring"""
return setitem, k, v
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
return delitem, k
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple , *lowerCAmelCase__: List[Any] ):
"""simple docstring"""
try:
return fun(lowerCAmelCase__ , *lowerCAmelCase__ ), None
except Exception as e:
return None, e
a : Optional[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a : Any = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a : List[str] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a : int = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowerCAmelCase_ (lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase_: Tuple = {}
for _, (fun, *args) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_: int = _run_operation(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = _run_operation(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ )
assert my_res == py_res
assert str(lowerCAmelCase__ ) == str(lowerCAmelCase__ )
assert set(lowerCAmelCase__ ) == set(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ():
"""simple docstring"""
def is_public(lowerCAmelCase__: str ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase_: List[str] = {name for name in dir({} ) if is_public(lowerCAmelCase__ )}
UpperCAmelCase_: List[str] = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase__ )}
assert dict_public_names > hash_public_names
| 82 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a : str = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
a : Optional[int] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = SqueezeBertTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Optional[Any] = do_lower_case
UpperCAmelCase_: int = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: List[Any] = [self.sep_token_id]
UpperCAmelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( UpperCAmelCase__ ):
def __init__( self ) -> List[Any]:
self.test()
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 0
_lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
_lowerCAmelCase = self.advance()
if not self.does_advance(_a ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.update(_a )
counter += 1
if counter > 10000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def _snake_case ( self ) -> List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self ) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self , _lowerCAmelCase=False ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( UpperCAmelCase__ ):
def __init__( self , _lowerCAmelCase ) -> List[str]:
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
_lowerCAmelCase = token_ids
_lowerCAmelCase = len(self.token_ids )
_lowerCAmelCase = -1 # the index of the currently fulfilled step
_lowerCAmelCase = False
def _snake_case ( self ) -> List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_a )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _snake_case ( self , _lowerCAmelCase ) -> Dict:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_a )}''' )
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
if self.does_advance(_a ):
self.fulfilled_idx += 1
_lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
_lowerCAmelCase = True
_lowerCAmelCase = completed
else:
# failed to make progress.
_lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = False
_lowerCAmelCase = 0
def _snake_case ( self ) -> int:
return self.seqlen - (self.fulfilled_idx + 1)
def _snake_case ( self , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
_lowerCAmelCase = self.seqlen
_lowerCAmelCase = self.fulfilled_idx
_lowerCAmelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
_lowerCAmelCase = max([len(_a ) for one in nested_token_ids] )
_lowerCAmelCase = {}
for token_ids in nested_token_ids:
_lowerCAmelCase = root
for tidx, token_id in enumerate(_a ):
if token_id not in level:
_lowerCAmelCase = {}
_lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(_a , _a ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f''' {nested_token_ids}.''' )
_lowerCAmelCase = root
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = self.trie
for current_token in current_seq:
_lowerCAmelCase = start[current_token]
_lowerCAmelCase = list(start.keys() )
return next_tokens
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.next_tokens(_a )
return len(_a ) == 0
def _snake_case ( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = list(root.values() )
if len(_a ) == 0:
return 1
else:
return sum([self.count_leaves(_a ) for nn in next_nodes] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.count_leaves(_a )
return len(_a ) != leaf_count
class lowerCAmelCase_ ( UpperCAmelCase__ ):
def __init__( self , _lowerCAmelCase ) -> Tuple:
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_a , _a ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
_lowerCAmelCase = DisjunctiveTrie(_a )
_lowerCAmelCase = nested_token_ids
_lowerCAmelCase = self.trie.max_height
_lowerCAmelCase = []
_lowerCAmelCase = False
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(_a ) == 0:
return None
else:
return token_list
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}''' )
_lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _snake_case ( self , _lowerCAmelCase ) -> str:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}''' )
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
if self.does_advance(_a ):
self.current_seq.append(_a )
_lowerCAmelCase = True
else:
_lowerCAmelCase = True
self.reset()
_lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
_lowerCAmelCase = completed
return stepped, completed, reset
def _snake_case ( self ) -> int:
_lowerCAmelCase = False
_lowerCAmelCase = []
def _snake_case ( self ) -> Union[str, Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _snake_case ( self , _lowerCAmelCase=False ) -> str:
_lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
_lowerCAmelCase = self.seqlen
_lowerCAmelCase = self.current_seq
_lowerCAmelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
_lowerCAmelCase = max([c.seqlen for c in constraints] )
_lowerCAmelCase = len(_a )
_lowerCAmelCase = False
self.init_state()
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = []
_lowerCAmelCase = None
_lowerCAmelCase = [constraint.copy(stateful=_a ) for constraint in self.constraints]
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_lowerCAmelCase = constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
else:
_lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
if len(_a ) == 0:
return None
else:
return token_list
def _snake_case ( self , _lowerCAmelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_lowerCAmelCase , _lowerCAmelCase = self.add(_a )
# the entire list of constraints are fulfilled
if self.completed:
break
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
_lowerCAmelCase , _lowerCAmelCase = False, False
if self.completed:
_lowerCAmelCase = True
_lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.inprogress_constraint.update(_a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_a ) )
_lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
_lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_a ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pending_constraint.update(_a )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(_a )
_lowerCAmelCase = None
if not complete and stepped:
_lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _snake_case ( self , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_lowerCAmelCase = [
constraint.copy(stateful=_a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_lowerCAmelCase = self.inprogress_constraint.copy(stateful=_a )
_lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 158 |
"""simple docstring"""
from typing import Any
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list:
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCamelCase = {}
lowerCamelCase = {}
for state in states_space:
lowerCamelCase = observations_space[0]
lowerCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
lowerCamelCase = observations_space[o]
lowerCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
# Update probabilities and pointers dicts
lowerCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase = arg_max
# The final observation
lowerCamelCase = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
lowerCamelCase = arg_max
# Process pointers backwards
lowerCamelCase = last_state
lowerCamelCase = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
lowerCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_list(snake_case__ , """observations_space""" )
_validate_list(snake_case__ , """states_space""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list'
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list of strings'
raise ValueError(snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ )
_validate_nested_dict(snake_case__ , """transition_probabilities""" )
_validate_nested_dict(snake_case__ , """emission_probabilities""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a dict'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
lowerCamelCase = F'{var_name} all keys must be strings'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
lowerCamelCase = """nested dictionary """ if nested else """"""
lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _a ( _lowercase):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Optional[int]=None , **_SCREAMING_SNAKE_CASE : Tuple )-> List[Any]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Optional[int] = config_class
lowerCAmelCase__ : Tuple = has_text_modality
lowerCAmelCase__ : Tuple = kwargs
lowerCAmelCase__ : Dict = common_properties
def UpperCAmelCase__( self : Any )-> int:
lowerCAmelCase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Any = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(_SCREAMING_SNAKE_CASE ):
try:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , msg=F'`{name} value {idx} expected, but was {getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase__ : str = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , msg=F'`{name} value {idx} expected, but was {getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase__( self : List[Any] )-> Dict:
lowerCAmelCase__ : List[str] = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] )-> Union[str, Any]:
lowerCAmelCase__ : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''config.json''' )
config_first.to_json_file(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = self.config_class.from_json_file(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = self.config_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__( self : Any )-> int:
lowerCAmelCase__ : Tuple = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : str = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
config_first.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.config_class.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCAmelCase__ : Any = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase__( self : Optional[Any] )-> str:
if self.config_class.is_composition:
return
lowerCAmelCase__ : Dict = self.config_class()
self.parent.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ : Dict = copy.deepcopy(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.config_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != value:
wrong_values.append((key, getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), value) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ : Union[str, Any] = '''\n'''.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def UpperCAmelCase__( self : Union[str, Any] )-> Tuple:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 211 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
snake_case_ : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Dict = "cpu"
snake_case_ : Any = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
snake_case_ : Union[str, Any] = "path-to-your-trained-model"
snake_case_ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
snake_case_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
snake_case_ : Dict = pipe.to(device)
# to channels last
snake_case_ : List[Any] = pipe.unet.to(memory_format=torch.channels_last)
snake_case_ : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
snake_case_ : Tuple = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
snake_case_ : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
snake_case_ : List[Any] = torch.randn(2, 4, 64, 64)
snake_case_ : int = torch.rand(1) * 999
snake_case_ : List[Any] = torch.randn(2, 77, 768)
snake_case_ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
snake_case_ : Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
snake_case_ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
snake_case_ : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
snake_case_ : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
snake_case_ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
snake_case_ : str = 666
snake_case_ : List[Any] = torch.Generator(device).manual_seed(seed)
snake_case_ : List[str] = {"generator": generator}
if args.steps is not None:
snake_case_ : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
snake_case_ : Optional[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 51 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['transformers', 'torch', 'note_seq']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 213 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class A__ ( enum.Enum ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = 1
@add_end_docstrings(snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'generated'
def __init__( self , *__snake_case , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ):
snake_case = {}
if truncation is not None:
snake_case = truncation
snake_case = generate_kwargs
snake_case = {}
if return_tensors is not None and return_type is None:
snake_case = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case = return_type
if clean_up_tokenization_spaces is not None:
snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case = self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
if len(__snake_case ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , __snake_case , __snake_case , __snake_case ):
return True
def a_ ( self , *__snake_case , __snake_case ):
snake_case = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , __snake_case ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
snake_case = ([prefix + arg for arg in args[0]],)
snake_case = True
elif isinstance(args[0] , __snake_case ):
snake_case = (prefix + args[0],)
snake_case = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
snake_case = self.tokenizer(*__snake_case , padding=__snake_case , truncation=__snake_case , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__snake_case , **__snake_case ):
snake_case = super().__call__(*__snake_case , **__snake_case )
if (
isinstance(args[0] , __snake_case )
and all(isinstance(__snake_case , __snake_case ) for el in args[0] )
and all(len(__snake_case ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a_ ( self , __snake_case , __snake_case=TruncationStrategy.DO_NOT_TRUNCATE , **__snake_case ):
snake_case = self._parse_and_tokenize(__snake_case , truncation=__snake_case , **__snake_case )
return inputs
def a_ ( self , __snake_case , **__snake_case ):
if self.framework == "pt":
snake_case , snake_case = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
snake_case , snake_case = tf.shape(model_inputs['''input_ids'''] ).numpy()
snake_case = generate_kwargs.get('''min_length''' , self.model.config.min_length )
snake_case = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(__snake_case , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
snake_case = self.model.generate(**__snake_case , **__snake_case )
snake_case = output_ids.shape[0]
if self.framework == "pt":
snake_case = output_ids.reshape(__snake_case , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case = tf.reshape(__snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a_ ( self , __snake_case , __snake_case=ReturnType.TEXT , __snake_case=False ):
snake_case = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
snake_case = {
F'''{self.return_name}_text''': self.tokenizer.decode(
__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case , )
}
records.append(__snake_case )
return records
@add_end_docstrings(snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'summary'
def __call__( self , *__snake_case , **__snake_case ):
return super().__call__(*__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'translation'
def a_ ( self , __snake_case , __snake_case , __snake_case ):
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def a_ ( self , *__snake_case , __snake_case=TruncationStrategy.DO_NOT_TRUNCATE , __snake_case=None , __snake_case=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , __snake_case ):
return self.tokenizer._build_translation_inputs(
*__snake_case , return_tensors=self.framework , truncation=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case )
else:
return super()._parse_and_tokenize(*__snake_case , truncation=__snake_case )
def a_ ( self , __snake_case=None , __snake_case=None , **__snake_case ):
snake_case , snake_case , snake_case = super()._sanitize_parameters(**__snake_case )
if src_lang is not None:
snake_case = src_lang
if tgt_lang is not None:
snake_case = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case = kwargs.get('''task''' , self.task )
snake_case = task.split('''_''' )
if task and len(__snake_case ) == 4:
# translation, XX, to YY
snake_case = items[1]
snake_case = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__snake_case , **__snake_case ):
return super().__call__(*__snake_case , **__snake_case )
| 213 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A__ ( UpperCAmelCase_ ):
for param in module.parameters():
_UpperCamelCase : Dict = False
def A__ ( ):
_UpperCamelCase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCamelCase : Tuple = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = plt.imshow(UpperCAmelCase_ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase_ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase_ )
plt.show()
def A__ ( ):
_UpperCamelCase : int = datetime.now()
_UpperCamelCase : Tuple = current_time.strftime('%H:%M:%S' )
return timestamp
| 83 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
snake_case : Tuple = logging.get_logger(__name__)
snake_case : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
snake_case : Any = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
snake_case : str = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
UpperCamelCase__ = RobertaTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ):
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
__magic_name__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
__magic_name__ : Any = getattr(_a , pre_tok_state.pop("type" ) )
__magic_name__ : str = add_prefix_space
__magic_name__ : List[Any] = pre_tok_class(**_a )
__magic_name__ : List[Any] = add_prefix_space
__magic_name__ : List[str] = "post_processor"
__magic_name__ : List[Any] = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
__magic_name__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__magic_name__ : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
__magic_name__ : Any = tuple(state["cls"] )
__magic_name__ : Dict = False
if state.get("add_prefix_space" , _a ) != add_prefix_space:
__magic_name__ : str = add_prefix_space
__magic_name__ : int = True
if state.get("trim_offsets" , _a ) != trim_offsets:
__magic_name__ : Dict = trim_offsets
__magic_name__ : Optional[int] = True
if changes_to_apply:
__magic_name__ : List[str] = getattr(_a , state.pop("type" ) )
__magic_name__ : List[Any] = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def SCREAMING_SNAKE_CASE ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
__magic_name__ : List[str] = value
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
__magic_name__ : Any = kwargs.get("is_split_into_words" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
__magic_name__ : List[str] = kwargs.get("is_split_into_words" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a=None ):
__magic_name__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 41 |
import numpy
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ : Any = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ : Tuple = numpy.zeros(output_array.shape )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
for iteration in range(1 , iterations + 1 ):
__magic_name__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : str = input_arr
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 41 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=UpperCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=UpperCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase ) )] ) / len(UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase , per_device_train_batch_size=UpperCAmelCase , per_device_eval_batch_size=UpperCAmelCase , predict_with_generate=UpperCAmelCase , evaluation_strategy='steps' , do_train=UpperCAmelCase , do_eval=UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=UpperCAmelCase , args=UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , tokenizer=UpperCAmelCase , )
# start training
trainer.train()
| 39 | '''simple docstring'''
def __UpperCAmelCase ( a_: str, a_: str ):
if len(a_ ) != len(a_ ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : Dict = 0
for chara, chara in zip(a_, a_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 145 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
# Load configuration defined in the metadata file
with open(_lowerCAmelCase ) as metadata_file:
UpperCamelCase : Optional[int] = json.load(_lowerCAmelCase )
UpperCamelCase : Optional[int] = LukeConfig(use_entity_aware_attention=_lowerCAmelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase : Dict = torch.load(_lowerCAmelCase , map_location="cpu" )
# Load the entity vocab file
UpperCamelCase : List[str] = load_entity_vocab(_lowerCAmelCase )
UpperCamelCase : Any = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase : Any = AddedToken("<ent>" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
UpperCamelCase : Optional[int] = AddedToken("<ent2>" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[Any] = LukeTokenizer.from_pretrained(_lowerCAmelCase )
# Initialize the embeddings of the special tokens
UpperCamelCase : Dict = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
UpperCamelCase : List[str] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
UpperCamelCase : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase : str = F"""encoder.layer.{layer_index}.attention.self."""
UpperCamelCase : Tuple = state_dict[prefix + matrix_name]
UpperCamelCase : Optional[int] = state_dict[prefix + matrix_name]
UpperCamelCase : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase : int = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase : List[Any] = entity_emb[entity_vocab["[MASK]"]]
UpperCamelCase : Optional[int] = LukeModel(config=_lowerCAmelCase ).eval()
UpperCamelCase , UpperCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if not (len(_lowerCAmelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(_lowerCAmelCase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
UpperCamelCase : List[str] = LukeTokenizer.from_pretrained(_lowerCAmelCase , task="entity_classification" )
UpperCamelCase : List[Any] = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
UpperCamelCase : Any = (39, 42)
UpperCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , entity_spans=[span] , add_prefix_space=_lowerCAmelCase , return_tensors="pt" )
UpperCamelCase : Any = model(**_lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
UpperCamelCase : Optional[Any] = torch.Size((1, 42, 1024) )
UpperCamelCase : Optional[int] = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
UpperCamelCase : Any = torch.Size((1, 42, 768) )
UpperCamelCase : List[str] = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCamelCase : List[Any] = torch.Size((1, 1, 1024) )
UpperCamelCase : Optional[int] = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
UpperCamelCase : Optional[Any] = torch.Size((1, 1, 768) )
UpperCamelCase : Tuple = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_lowerCAmelCase ) )
model.save_pretrained(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : Tuple = {}
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
UpperCamelCase , UpperCamelCase : Optional[Any] = line.rstrip().split("\t" )
UpperCamelCase : Optional[Any] = index
return entity_vocab
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 140 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger("""transformers.models.speecht5""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
hf_model.apply_weight_norm()
UpperCamelCase : int = checkpoint["input_conv.weight_g"]
UpperCamelCase : Dict = checkpoint["input_conv.weight_v"]
UpperCamelCase : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCamelCase : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_g"]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
UpperCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> Tuple:
if config_path is not None:
UpperCamelCase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = SpeechTaHifiGanConfig()
UpperCamelCase : List[str] = SpeechTaHifiGan(_lowerCAmelCase )
UpperCamelCase : str = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[Any] = np.load(_lowerCAmelCase )
UpperCamelCase : List[str] = stats[0].reshape(-1 )
UpperCamelCase : Tuple = stats[1].reshape(-1 )
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 140 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a__ : List[Any] = None
a__ : Dict = logging.get_logger(__name__)
a__ : Any = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a__ : str = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
a__ : Any = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
a__ : int = '▁'
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
__UpperCAmelCase = BarthezTokenizer
def __init__( self , a=None , a=None , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , **a , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def __a ( self , a , a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , a , a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 80 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 1 |
def a__ ( __UpperCamelCase ):
if len(lowerCAmelCase__ ) <= 1:
return [tuple(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE_ = []
def generate(__UpperCamelCase , __UpperCamelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
SCREAMING_SNAKE_CASE_ = arr[k - 1], arr[i]
else: # k is odd
SCREAMING_SNAKE_CASE_ = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase__ )
generate(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return res
if __name__ == "__main__":
A : Dict = input("Enter numbers separated by a comma:\n").strip()
A : Optional[Any] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 352 | from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase : int = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase : Tuple = 0.0_1
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
UpperCAmelCase : Tuple = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def snake_case_ ( _lowerCAmelCase : Any ) -> List[Any]:
UpperCAmelCase : Optional[int] = '''a''' * 1000 + '''.lock'''
UpperCAmelCase : str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase : int = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 23 |
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_UpperCamelCase , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_UpperCamelCase , default=5 )
parser.add_argument("--batch_size" , type=_UpperCamelCase , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_UpperCamelCase , default=1 )
parser.add_argument("--freeze" , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument("--learning_rate" , type=_UpperCamelCase , default=5e-4 )
parser.add_argument("--seed" , type=_UpperCamelCase , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_UpperCamelCase , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_UpperCamelCase , default=10 )
parser.add_argument("--weight_decay" , type=_UpperCamelCase , default=0.0_1 )
parser.add_argument("--output_dir" , type=_UpperCamelCase , default="./results" )
return parser.parse_args()
__lowercase: Union[str, Any] = load("accuracy")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = eval_pred
UpperCamelCase__ = np.argmax(_UpperCamelCase , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[str], a_ : int ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = trainer
def lowercase_ ( self : List[Any], a_ : Union[str, Any], a_ : List[str], a_ : int, **a_ : List[str] ):
"""simple docstring"""
if control.should_evaluate:
UpperCamelCase__ = deepcopy(a_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix="train" )
return control_copy
def SCREAMING_SNAKE_CASE__( ) -> int:
'''simple docstring'''
UpperCamelCase__ = get_args()
set_seed(args.seed )
UpperCamelCase__ = load_dataset("codeparrot/codecomplex" , split="train" )
UpperCamelCase__ = dataset.train_test_split(test_size=0.2 )
UpperCamelCase__ = train_test["test"].train_test_split(test_size=0.5 )
UpperCamelCase__ = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase__ = tokenizer.eos_token
UpperCamelCase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCamelCase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCamelCase__ = False
UpperCamelCase__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_UpperCamelCase : Tuple ):
UpperCamelCase__ = tokenizer(example["src"] , truncation=_UpperCamelCase , max_length=10_24 )
UpperCamelCase__ = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCamelCase__ = train_test_validation.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=train_test_validation["train"].column_names , )
UpperCamelCase__ = DataCollatorWithPadding(tokenizer=_UpperCamelCase )
UpperCamelCase__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
print("Training..." )
trainer.add_callback(CustomCallback(_UpperCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 357 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass | 31 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 1_0, """max_num_jobs""": 1}, [range(1_0 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 1_0}, [range(lowerCAmelCase__ , i + 1 ) for i in range(1_0 )]),
({"""num_shards""": 1, """max_num_jobs""": 1_0}, [range(1 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"""num_shards""": 3, """max_num_jobs""": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = _distribute_shards(**lowerCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 1_0, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = _split_gen_kwargs(lowerCAmelCase__ , lowerCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase__ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
else:
UpperCAmelCase_: List[str] = _number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
assert out == expected
| 147 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Any = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return model
@property
def __snake_case (self ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), cross_attention_dim=10, )
return model
@property
def __snake_case (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D"""), up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D"""), )
UpperCAmelCase_: Optional[Any] = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return vqvae, unet
@slow
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: str = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCAmelCase_: Tuple = DDPMScheduler()
UpperCAmelCase_: List[Any] = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_, unet=self.dummy_unet, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: str = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4 )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[int] = output.images[0]
UpperCAmelCase_: Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4, return_dict=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: List[Any] = np.frombuffer(image_from_tuple.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Dict = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: int = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCAmelCase_: List[str] = DDIMScheduler()
UpperCAmelCase_: int = self.dummy_vqvae_and_unet
UpperCAmelCase_: Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: Dict = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(raw_audio=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, start_step=5, steps=10 )
UpperCAmelCase_: Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_: Union[str, Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: Union[str, Any] = self.dummy_unet_condition
UpperCAmelCase_: Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=SCREAMING_SNAKE_CASE_, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: List[str] = torch.rand((1, 1, 10) )
UpperCAmelCase_: Optional[int] = pipe(generator=SCREAMING_SNAKE_CASE_, encoding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = output.images[0]
UpperCAmelCase_: Any = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = torch_device
UpperCAmelCase_: Any = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
UpperCAmelCase_: Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 147 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> List[Any]:
require_version(deps[pkg] , UpperCamelCase_ )
| 328 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a__ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
a__ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
a__ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __lowercase ( self , _a , _a , _a=None , _a=True , _a=False ) -> int:
if rouge_types is None:
_a : Dict = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_a : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
_a : Any = scoring.BootstrapAggregator()
else:
_a : Tuple = []
for ref, pred in zip(_a , _a ):
_a : Optional[int] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
_a : Union[str, Any] = aggregator.aggregate()
else:
_a : Optional[Any] = {}
for key in scores[0]:
_a : Optional[Any] = [score[key] for score in scores]
return result
| 235 | """simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__A = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__A = [ord(letter) for letter in string.ascii_lowercase]
__A = {ord(char) for char in VALID_CHARS}
__A = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, ...] ) -> str | None:
'''simple docstring'''
__lowerCamelCase : str = ""
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
__lowerCamelCase : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowercase_ ( _lowerCamelCase: list[int] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
__lowerCamelCase : Tuple = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowercase_ ( _lowerCamelCase: list[str] , _lowerCamelCase: str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase_ ( _lowerCamelCase: str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
__lowerCamelCase : list[int]
__lowerCamelCase : list[str]
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
__lowerCamelCase : Any = [int(_lowerCamelCase ) for number in data.strip().split("," )]
__lowerCamelCase : Any = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
__lowerCamelCase : Dict = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
__lowerCamelCase : List[Any] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""") | 135 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( a_ , unittest.TestCase ):
snake_case__ : Optional[Any] = KandinskyVaaImgaImgPipeline
snake_case__ : int = ['''image_embeds''', '''negative_image_embeds''', '''image''']
snake_case__ : int = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
snake_case__ : Any = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ : Optional[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Tuple = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a_ : Optional[Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
torch.manual_seed(0 )
a_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
a_ : Union[str, Any] = self.dummy_unet
a_ : str = self.dummy_movq
a_ : Any = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
a_ : List[str] = DDIMScheduler(**lowercase_ )
a_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> str:
a_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
a_ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase_ )
# create init_image
a_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
a_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : str = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(lowercase_ ).startswith('mps' ):
a_ : int = torch.manual_seed(lowercase_ )
else:
a_ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
a_ : str = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ : Optional[Any] = '''cpu'''
a_ : Any = self.get_dummy_components()
a_ : Any = self.pipeline_class(**lowercase_ )
a_ : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
a_ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase_ ) )
a_ : Dict = output.images
a_ : Tuple = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
a_ : Dict = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : str = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
a_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
a_ : Optional[Any] = '''A red cartoon frog, 4k'''
a_ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
a_ : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
a_ : List[Any] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Tuple = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a_ : Optional[Any] = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
a_ : int = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 358 |
import json
import sys
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[str] ) -> Tuple:
"""simple docstring"""
with open(__A , encoding='utf-8' ) as f:
a_ : Union[str, Any] = json.load(__A )
a_ : Any = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(__A ):
a_ : List[str] = results[benchmark_name]
a_ : int = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
a_ : Any = '| metric |'
a_ : Optional[Any] = '|--------|'
a_ : int = '| new / old (diff) |'
for metric_name in sorted(__A ):
a_ : List[Any] = benchmark_res[metric_name]
a_ : int = metric_vals['new']
a_ : Union[str, Any] = metric_vals.get('old' , __A )
a_ : Optional[int] = metric_vals.get('diff' , __A )
a_ : str = F""" {new_val:f}""" if isinstance(__A , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(__A , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(__A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(__A ) )
if __name__ == "__main__":
UpperCAmelCase_ : int = sys.argv[1]
UpperCAmelCase_ : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 120 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
UpperCamelCase__ = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
UpperCamelCase__ = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = simple_accuracy(__A, __A )
UpperCAmelCase__ = float(fa_score(y_true=__A, y_pred=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = np.array(__A )
UpperCAmelCase__ = np.array(__A )
UpperCAmelCase__ = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase__ = en_sentvecs - np.mean(__A, axis=0 )
UpperCAmelCase__ = in_sentvecs - np.mean(__A, axis=0 )
UpperCAmelCase__ = cdist(__A, __A, "cosine" )
UpperCAmelCase__ = np.array(range(__A ) )
UpperCAmelCase__ = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase__ = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 65 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if hor == 1_28:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 64, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__lowerCAmelCase = model
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 92 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'vocab_file': 'spiece.model'}
lowerCamelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=False , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<sep>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Any="<cls>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Dict=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = remove_space
_SCREAMING_SNAKE_CASE = keep_accents
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_SCREAMING_SNAKE_CASE = jieba
_SCREAMING_SNAKE_CASE = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Dict , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : int ):
"""simple docstring"""
if self.remove_space:
_SCREAMING_SNAKE_CASE = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE = inputs
_SCREAMING_SNAKE_CASE = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def lowerCAmelCase_ ( self : str , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Any ):
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCamelCase )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def lowerCAmelCase_ ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase_ ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def lowerCAmelCase_ ( self : int , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super()._decode(*__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 351 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
if len(__A ) <= 1:
return lst
_SCREAMING_SNAKE_CASE = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = lst[i], lst[i - 1]
i -= 1
if i == 0:
_SCREAMING_SNAKE_CASE = 1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 111 | 0 |
import random
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = a[left_index]
UpperCAmelCase__ = left_index + 1
for j in range(left_index + 1, __A ):
if a[j] < pivot:
UpperCAmelCase__ , UpperCAmelCase__ = a[i], a[j]
i += 1
UpperCAmelCase__ , UpperCAmelCase__ = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase_ ( __A, __A, __A ) -> Tuple:
'''simple docstring'''
if left < right:
UpperCAmelCase__ = random.randint(__A, right - 1 )
UpperCAmelCase__ , UpperCAmelCase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCAmelCase__ = partition(__A, __A, __A )
quick_sort_random(
__A, __A, __A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__A, pivot_index + 1, __A ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n" ).strip()
UpperCAmelCase__ = [int(__A ) for item in user_input.split("," )]
quick_sort_random(__A, 0, len(__A ) )
print(__A )
if __name__ == "__main__":
main()
| 65 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]="cls" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=True , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [r'pooler', r'logit_scale']
__UpperCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
__UpperCAmelCase : Any = 'roberta'
__UpperCAmelCase : List[str] = RobertaSeriesConfig
def __init__(self : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = XLMRobertaModel(__UpperCAmelCase )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__UpperCAmelCase , "has_pre_transformation" , __UpperCAmelCase )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs["hidden_states"][-2]
UpperCAmelCase__ = self.pre_LN(__UpperCAmelCase )
UpperCAmelCase__ = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65 | 1 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
SCREAMING_SNAKE_CASE__:str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
SCREAMING_SNAKE_CASE__:str = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
SCREAMING_SNAKE_CASE__:Any = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE__:int = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 354 | """simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__:Any = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
SCREAMING_SNAKE_CASE__:Optional[int] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
SCREAMING_SNAKE_CASE__:Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCamelCase( a ):
__a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCamelCase( a ):
return x[0]
def _lowerCamelCase( a ):
__a = get_letter_count(a )
__a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a )
__a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a )
__a = "".join(freq_to_letter[freq] )
__a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a , reverse=a )
__a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a )
def _lowerCamelCase( a ):
__a = get_frequency_order(a )
__a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def snake_case_ ( A_ : List[str], A_ : int, A_ : Optional[int]=0 ):
'''simple docstring'''
if name is None:
_lowerCamelCase : List[Any] = None
else:
_lowerCamelCase : List[str] = '''.''' * max(0, spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
_lowerCamelCase : Optional[int] = fmt.format(A_ )
# Print and recurse (if needed).
if isinstance(A_, A_ ):
if msg is not None:
print(A_ )
for k in val.keys():
recursive_print(A_, val[k], spaces + 2 )
elif isinstance(A_, torch.Tensor ):
print(A_, ''':''', val.size() )
else:
print(A_, ''':''', A_ )
def snake_case_ ( A_ : Optional[int], A_ : Union[str, Any], A_ : List[Any], A_ : Optional[Any], A_ : Any ):
'''simple docstring'''
_lowerCamelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCamelCase : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCamelCase : List[Any] = param.view(*A_ )
_lowerCamelCase : Any = param.transpose(0, 2 )
_lowerCamelCase : Union[str, Any] = param.transpose(1, 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCamelCase : Any = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCamelCase : Optional[Any] = param.view(*A_ )
_lowerCamelCase : Any = param.transpose(0, 1 ).contiguous()
_lowerCamelCase : Any = param.view(*A_ )
return param
def snake_case_ ( A_ : Optional[Any], A_ : Tuple, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
# old versions did not store training args
_lowerCamelCase : Union[str, Any] = input_state_dict.get('''args''', A_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCamelCase : Optional[Any] = ds_args.padded_vocab_size
_lowerCamelCase : List[Any] = ds_args.max_position_embeddings
_lowerCamelCase : Dict = ds_args.hidden_size
_lowerCamelCase : Union[str, Any] = ds_args.num_layers
_lowerCamelCase : Any = ds_args.num_attention_heads
_lowerCamelCase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCamelCase : Optional[int] = config.n_head
# The hidden_size per head.
_lowerCamelCase : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCamelCase : List[Any] = input_state_dict['''checkpoint_version''']
else:
_lowerCamelCase : List[str] = 0.0
# The model.
_lowerCamelCase : Tuple = input_state_dict['''model''']
# The language model.
_lowerCamelCase : Dict = model['''language_model''']
# The embeddings.
_lowerCamelCase : int = lm['''embedding''']
# The word embeddings.
_lowerCamelCase : Optional[int] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_lowerCamelCase : Dict = word_embeddings[: config.vocab_size, :]
_lowerCamelCase : str = word_embeddings
# The position embeddings.
_lowerCamelCase : Tuple = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCamelCase : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_lowerCamelCase : List[Any] = pos_embeddings
# The transformer.
_lowerCamelCase : str = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_lowerCamelCase : int = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
_lowerCamelCase : Union[str, Any] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCamelCase : int = layer_re.match(A_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCamelCase : int = int(m.group(1 ) )
# The name of the operation.
_lowerCamelCase : str = m.group(2 )
# Is it a weight or a bias?
_lowerCamelCase : Any = m.group(3 )
# The name of the layer.
_lowerCamelCase : Tuple = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
_lowerCamelCase : List[str] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
_lowerCamelCase : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCamelCase : List[Any] = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.floataa ) ).view(
1, 1, A_, A_ )
_lowerCamelCase : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCamelCase : int = torch.tensor(-1E4, dtype=torch.floataa )
_lowerCamelCase : List[str] = masked_bias
_lowerCamelCase : Tuple = fix_query_key_value_ordering(A_, A_, 3, A_, A_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCamelCase : Optional[int] = out_val.transpose(0, 1 ).contiguous()
# Store.
_lowerCamelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCamelCase : Tuple = fix_query_key_value_ordering(A_, A_, 3, A_, A_ )
# Store. No change of shape.
_lowerCamelCase : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCamelCase : Union[str, Any] = megatron_to_transformers[op_name]
_lowerCamelCase : Tuple = val.transpose(0, 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCamelCase : Any = megatron_to_transformers[op_name]
_lowerCamelCase : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCamelCase : Optional[Any] = transformer['''final_layernorm.weight''']
_lowerCamelCase : Optional[Any] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCamelCase : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''', action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''', type=A_, help='''Path to the checkpoint file (.zip archive or direct .pt file)''', )
parser.add_argument(
'''--config_file''', default='''''', type=A_, help='''An optional config json file describing the pre-trained model.''', )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
# Extract the basename.
_lowerCamelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint, '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
_lowerCamelCase : Any = torch.load(A_, map_location='''cpu''' )
else:
_lowerCamelCase : Dict = torch.load(args.path_to_checkpoint, map_location='''cpu''' )
_lowerCamelCase : Tuple = input_state_dict.get('''args''', A_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCamelCase : List[str] = '''gelu_fast'''
elif ds_args.openai_gelu:
_lowerCamelCase : List[Any] = '''gelu_new'''
else:
_lowerCamelCase : Optional[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_lowerCamelCase : List[str] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_lowerCamelCase : str = GPTaConfig(
vocab_size=5_02_57, n_positions=10_24, n_embd=10_24, n_layer=24, n_head=16, n_inner=40_96, activation_function=A_, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1E-5, initializer_range=0.02, summary_type='''cls_index''', summary_use_proj=A_, summary_activation=A_, summary_proj_to_labels=A_, summary_first_dropout=0.1, scale_attn_weights=A_, use_cache=A_, bos_token_id=5_02_56, eos_token_id=5_02_56, )
else:
_lowerCamelCase : List[Any] = GPTaConfig.from_json_file(args.config_file )
_lowerCamelCase : str = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
_lowerCamelCase : List[Any] = convert_megatron_checkpoint(A_, A_, A_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A_, A_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCamelCase : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCamelCase : str = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_lowerCamelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_lowerCamelCase : Tuple = '''gpt2'''
_lowerCamelCase : str = AutoTokenizer.from_pretrained(A_ )
_lowerCamelCase : Union[str, Any] = type(A_ ).__name__
_lowerCamelCase : Tuple = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(A_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(A_ )
# Store the state_dict to file.
_lowerCamelCase : Any = os.path.join(A_, '''pytorch_model.bin''' )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(A_, A_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 72 |
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _snake_case ( __snake_case ):
A__ : Union[List[PIL.Image.Image], np.ndarray]
A__ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 369 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "The name of the task to train on."} , )
A__ : Optional[List[str]] = dataclasses.field(
default=__snake_case , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
A__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
A__ : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[int] = dataclasses.field(
default=__snake_case , metadata={"help": "Random seed for initialization."} , )
def lowerCamelCase_ ( _a : str , _a : List[Any] , _a : List[Any] , _a : Dict , _a : int , _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : List[str] = dataset.filter(lambda _a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : List[str] = int(eval_result * len(_a ) )
print(_a )
UpperCAmelCase_ : int = dataset.sort("""probability""" , reverse=_a )
UpperCAmelCase_ : Optional[int] = dataset.select(range(_a ) )
UpperCAmelCase_ : List[str] = dataset.remove_columns(["""label""", """probability"""] )
UpperCAmelCase_ : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
UpperCAmelCase_ : Union[str, Any] = dataset.map(lambda _a : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : int = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : int = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(_a , index=_a )
else:
dataset.to_json(_a )
def lowerCamelCase_ ( _a : Any , _a : int , _a : Dict , _a : List[Any] , **_a : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=_a )
UpperCAmelCase_ : str = STDataArguments(train_file=_a , infer_file=_a )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=_a )
UpperCAmelCase_ : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_a ).items():
setattr(_a , _a , _a )
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : Dict = args.eval_file
for key in data_files:
UpperCAmelCase_ : List[str] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCAmelCase_ : int = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
UpperCAmelCase_ : int = F'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCAmelCase_ : List[Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_a )
os.makedirs(_a , exist_ok=_a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[Any] = False
# Show the progress bar
UpperCAmelCase_ : List[str] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : Any = data_dir_format(_a )
assert os.path.exists(_a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[str] = os.path.join(_a , """stage-1""" )
UpperCAmelCase_ : Optional[int] = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_a , _a ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : Any = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Dict = os.path.join(_a , """best-checkpoint""" )
UpperCAmelCase_ : str = os.path.join(_a , """stage-2""" )
# Update arguments_dict
UpperCAmelCase_ : Union[str, Any] = model_path
UpperCAmelCase_ : Dict = data_files["""train"""]
UpperCAmelCase_ : List[str] = current_output_dir
UpperCAmelCase_ : str = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _a )
UpperCAmelCase_ : Optional[Any] = iteration
UpperCAmelCase_ : List[str] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(os.path.join(_a , """best-checkpoint""" ) )
UpperCAmelCase_ : str = config.idalabel
UpperCAmelCase_ : Union[str, Any] = os.path.join(_a , """eval_results_best-checkpoint.json""" )
UpperCAmelCase_ : int = os.path.join(_a , """test_results_best-checkpoint.json""" )
assert os.path.exists(_a )
with open(_a , """r""" ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(_a )[args.eval_metric] )
UpperCAmelCase_ : Dict = os.path.join(_a , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(_a )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : Optional[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
UpperCAmelCase_ : List[str] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(_a , exist_ok=_a )
shutil.copy(_a , os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(_a ):
shutil.copy(_a , os.path.join(_a , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(_a , _a , _a , _a , _a , _a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Tuple = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : Optional[Any] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Optional[int] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : List[str] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
UpperCAmelCase_ : int = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : Dict = new_iteration
UpperCAmelCase_ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , _a )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
| 59 | 0 |
import math
def _A ( SCREAMING_SNAKE_CASE__ : int = 100 ):
UpperCamelCase :Dict = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase :List[str] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 |
from math import factorial
__snake_case = {str(digit): factorial(digit) for digit in range(10)}
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : int = 60 , SCREAMING_SNAKE_CASE__ : int = 1000000 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCamelCase :Any = 0
# the cached sizes of the previous chains
UpperCamelCase :dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE__ ):
# The temporary set will contain the elements of the chain
UpperCamelCase :List[Any] = set()
UpperCamelCase :Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase :Optional[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE__ )
chain_set_length += 1
UpperCamelCase :List[Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase :Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 259 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None ) -> str:
require_version(deps[pkg] , __lowerCAmelCase )
| 339 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY")
SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL")
@dataclass(frozen=__lowercase , slots=__lowercase )
class lowerCAmelCase__ ( Generic[KEY, VAL] ):
a__ : KEY
a__ : VAL
class lowerCAmelCase__ ( _Item ):
def __init__( self : str ) -> None:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : Tuple ) -> bool:
return False
SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem()
class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None:
__lowerCamelCase = initial_block_size
__lowerCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowerCamelCase = capacity_factor
__lowerCamelCase = 0
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int:
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
return (ind + 1) % len(self._buckets )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool:
__lowerCamelCase = self._buckets[ind]
if not stored:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def __A ( self : Any ) -> bool:
__lowerCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = self._buckets
__lowerCamelCase = [None] * new_size
__lowerCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __A ( self : str ) -> None:
self._resize(len(self._buckets ) * 2 )
def __A ( self : Dict ) -> None:
self._resize(len(self._buckets ) // 2 )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]:
__lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
__lowerCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : int ) -> int:
return self._len
def __iter__( self : Tuple ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
__lowerCamelCase = ''' ,'''.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 339 | 1 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = CycleDiffusionPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
A_ = PipelineTesterMixin.required_optional_params - {"latents"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : int = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : Optional[int] = image / 2 + 0.5
if str(__a ).startswith('mps' ):
__a : Dict = torch.manual_seed(__a )
else:
__a : Dict = torch.Generator(device=__a ).manual_seed(__a )
__a : Optional[Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Tuple = self.get_dummy_components()
__a : int = CycleDiffusionPipeline(**__a )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : int = self.get_dummy_inputs(__a )
__a : Any = pipe(**__a )
__a : List[str] = output.images
__a : List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a : Optional[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__a , 'half' ):
__a : Dict = module.half()
__a : Optional[int] = CycleDiffusionPipeline(**__a )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Dict = self.get_dummy_inputs(__a )
__a : Any = pipe(**__a )
__a : int = output.images
__a : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a : Any = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__a : Optional[int] = init_image.resize((512, 512) )
__a : List[Any] = 'CompVis/stable-diffusion-v1-4'
__a : List[str] = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : Optional[Any] = CycleDiffusionPipeline.from_pretrained(
__a , scheduler=__a , safety_checker=__a , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'A black colored car'
__a : int = 'A blue colored car'
__a : int = torch.manual_seed(0 )
__a : List[Any] = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
__a : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__a : Optional[Any] = init_image.resize((512, 512) )
__a : int = 'CompVis/stable-diffusion-v1-4'
__a : Tuple = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : Tuple = CycleDiffusionPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'A black colored car'
__a : Optional[Any] = 'A blue colored car'
__a : Optional[int] = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
__a : Tuple = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 294 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294 | 1 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
A_ = -1
A_ = 0
for a in range(1 ,n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
A_ = n - a - b
if c * c == (a * a + b * b):
A_ = a * b * c
if candidate >= product:
A_ = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }") | 312 | '''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , A : Dict , A : Optional[Any]=13 , A : Optional[Any]=7 , A : Union[str, Any]=True , A : Optional[Any]=True , A : int=False , A : str=True , A : Optional[Any]=99 , A : Union[str, Any]=32 , A : int=5 , A : Tuple=4 , A : Union[str, Any]=37 , A : Dict="gelu" , A : Union[str, Any]=0.1 , A : str=0.1 , A : Union[str, Any]=512 , A : int=16 , A : List[str]=2 , A : Tuple=0.02 , A : int=3 , A : List[str]=4 , A : str=None , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : Any = use_input_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : List[str] = num_choices
_UpperCAmelCase : List[str] = scope
def _A ( self : Optional[int] ):
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Dict ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def _A ( self : int , A : List[Any] , A : Any , A : int , A : Union[str, Any] , A : Dict , A : List[Any] , A : Dict ):
_UpperCAmelCase : List[str] = BioGptModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A )
_UpperCAmelCase : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : List[Any] , A : str , A : List[Any] , A : Dict , A : List[Any] , A : List[str] , A : Union[str, Any] , A : int , A : List[str] , A : Dict , ):
_UpperCAmelCase : Optional[int] = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[Any] , A : str , A : str , A : str , A : Any , A : List[str] , *A : Optional[int] ):
_UpperCAmelCase : str = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
_UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
_UpperCAmelCase : Optional[int] = self.seq_length // 2
_UpperCAmelCase : List[Any] = 0
# first forward pass
_UpperCAmelCase , _UpperCAmelCase : List[str] = model(A , attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_UpperCAmelCase : List[str] = ids_tensor((1,) , A ).item() + 1
_UpperCAmelCase : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_UpperCAmelCase : Any = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCAmelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Optional[int] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A )] , dim=1 , )
# get two different outputs
_UpperCAmelCase : List[Any] = model(A , attention_mask=A )["last_hidden_state"]
_UpperCAmelCase : Optional[Any] = model(A , past_key_values=A , attention_mask=A )["last_hidden_state"]
# select random slice
_UpperCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def _A ( self : int , A : Dict , A : str , A : Dict , A : Union[str, Any] , A : Any , *A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = BioGptModel(config=A ).to(A ).eval()
_UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
# first forward pass
_UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , use_cache=A )
_UpperCAmelCase , _UpperCAmelCase : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Dict = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase : Any = model(A , attention_mask=A )["last_hidden_state"]
_UpperCAmelCase : Dict = model(A , attention_mask=A , past_key_values=A )[
"last_hidden_state"
]
# select random slice
_UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def _A ( self : Optional[Any] , A : Tuple , A : List[str] , A : Tuple , A : Dict , A : List[Any] , *A : Tuple , A : List[str]=False ):
_UpperCAmelCase : Optional[int] = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCAmelCase : Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _A ( self : Optional[Any] , A : Any , *A : Optional[Any] ):
_UpperCAmelCase : Tuple = BioGptModel(A )
_UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _A ( self : Optional[int] , A : Dict , A : Tuple , A : Optional[int] , A : int , A : List[str] , *A : Dict ):
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Any = BioGptForTokenClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , attention_mask=A , token_type_ids=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : int ):
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = config_and_inputs
_UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: List[str] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCamelCase: List[str] = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCamelCase: str = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: Union[str, Any] = False
def _A ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = BioGptModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=A , hidden_size=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _A ( self : Any ):
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : Any ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*A )
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A , gradient_checkpointing=A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def _A ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
_UpperCAmelCase : Tuple = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : str = "left"
# Define PAD Token = EOS Token = 50256
_UpperCAmelCase : Any = tokenizer.eos_token
_UpperCAmelCase : int = model.config.eos_token_id
# use different length sentences to test batching
_UpperCAmelCase : Any = [
"Hello, my dog is a little",
"Today, I",
]
_UpperCAmelCase : Tuple = tokenizer(A , return_tensors="pt" , padding=A )
_UpperCAmelCase : Optional[Any] = inputs["input_ids"].to(A )
_UpperCAmelCase : Any = model.generate(
input_ids=A , attention_mask=inputs["attention_mask"].to(A ) , )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(A )
_UpperCAmelCase : List[Any] = model.generate(input_ids=A )
_UpperCAmelCase : List[Any] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
_UpperCAmelCase : int = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(A )
_UpperCAmelCase : int = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase : Dict = tokenizer.batch_decode(A , skip_special_tokens=A )
_UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A )
_UpperCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=A )
_UpperCAmelCase : str = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(A , A )
self.assertListEqual(A , [non_padded_sentence, padded_sentence] )
@slow
def _A ( self : str ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def _A ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = 3
_UpperCAmelCase : List[str] = input_dict["input_ids"]
_UpperCAmelCase : Dict = input_ids.ne(1 ).to(A )
_UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase : List[str] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : List[str] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = 3
_UpperCAmelCase : Dict = "multi_label_classification"
_UpperCAmelCase : Optional[Any] = input_dict["input_ids"]
_UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(A )
_UpperCAmelCase : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase : Optional[Any] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
_UpperCAmelCase : List[Any] = model(A )[0]
_UpperCAmelCase : int = 42384
_UpperCAmelCase : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Any = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4 ) )
@slow
def _A ( self : Any ):
_UpperCAmelCase : str = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : Tuple = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = tokenizer("COVID-19 is" , return_tensors="pt" ).to(A )
_UpperCAmelCase : Dict = model.generate(
**A , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=A , )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=A )
_UpperCAmelCase : List[str] = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(A , A )
| 31 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase_ =logging.get_logger(__name__)
UpperCamelCase_ ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
UpperCamelCase_ ={
"""junnyu/roformer_chinese_small""": 1_536,
"""junnyu/roformer_chinese_base""": 1_536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
UpperCamelCase_ ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class _a ( _lowerCAmelCase ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = RoFormerTokenizer
def __init__( self : Union[str, Any], lowerCAmelCase__ : Union[str, Any]=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Dict=True, lowerCAmelCase__ : str="[UNK]", lowerCAmelCase__ : Optional[int]="[SEP]", lowerCAmelCase__ : List[str]="[PAD]", lowerCAmelCase__ : Optional[int]="[CLS]", lowerCAmelCase__ : int="[MASK]", lowerCAmelCase__ : str=True, lowerCAmelCase__ : Any=None, **lowerCAmelCase__ : Any, ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenize_chinese_chars=lowerCAmelCase__, strip_accents=lowerCAmelCase__, **lowerCAmelCase__, )
_UpperCamelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''', lowerCAmelCase__ ) != do_lower_case
or pre_tok_state.get('''strip_accents''', lowerCAmelCase__ ) != strip_accents
):
_UpperCamelCase : Tuple = getattr(lowerCAmelCase__, pre_tok_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : List[Any] = strip_accents
_UpperCamelCase : Dict = pre_tok_class(**lowerCAmelCase__ )
_UpperCamelCase : Dict = do_lower_case
def __getstate__( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self : List[Any], lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = d
_UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
_UpperCamelCase : Tuple = PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase__ ) )
def snake_case ( self : Any, lowerCAmelCase__ : Dict, lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : List[int], lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : str, lowerCAmelCase__ : str, lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def snake_case ( self : Optional[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Tuple=None, lowerCAmelCase__ : List[Any]=None, lowerCAmelCase__ : Union[str, Any]=False, **lowerCAmelCase__ : List[Any], ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__ )
| 128 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ =0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _a :
def __init__( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : str = WATERMARK_BITS
_UpperCamelCase : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''', self.watermark )
def snake_case ( self : Dict, lowerCAmelCase__ : torch.FloatTensor ) -> int:
'''simple docstring'''
if images.shape[-1] < 2_5_6:
return images
_UpperCamelCase : Union[str, Any] = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1 ).float().numpy()
_UpperCamelCase : List[str] = [self.encoder.encode(lowerCAmelCase__, '''dwtDct''' ) for image in images]
_UpperCamelCase : Dict = torch.from_numpy(np.array(lowerCAmelCase__ ) ).permute(0, 3, 1, 2 )
_UpperCamelCase : Optional[int] = torch.clamp(2 * (images / 2_5_5 - 0.5), min=-1.0, max=1.0 )
return images
| 128 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = tmp_path / '''file.csv'''
A__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = tmp_path / '''malformed_file.csv'''
A__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = tmp_path / '''csv_with_image.csv'''
A__ = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = tmp_path / '''csv_with_label.csv'''
A__ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = tmp_path / '''csv_with_int_list.csv'''
A__ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = Csv()
A__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowercase_ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowercase_ ) in record.message
for record in caplog.records )
@require_pil
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
with open(lowercase_ , encoding='''utf-8''' ) as f:
A__ = f.read().splitlines()[1]
A__ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
A__ = csv._generate_tables([[csv_file_with_image]] )
A__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
A__ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
with open(lowercase_ , encoding='''utf-8''' ) as f:
A__ = f.read().splitlines()[1:]
A__ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
A__ = csv._generate_tables([[csv_file_with_label]] )
A__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
A__ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowercase_ ) for label in labels]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowercase_ : [int(lowercase_ ) for i in x.split()]} )
A__ = csv._generate_tables([[csv_file_with_int_list]] )
A__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
A__ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
def __init__( self : Optional[int] , a : Dict , a : Tuple=13 , a : Union[str, Any]=30 , a : int=2 , a : Dict=3 , a : Optional[int]=True , a : Dict=True , a : Union[str, Any]=32 , a : List[Any]=5 , a : str=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Optional[int]=0.1 , a : int=0.1 , a : List[str]=10 , a : str=0.0_2 , a : Union[str, Any]=None , a : Optional[int]=2 , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : Optional[int] = num_patches + 1
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[Any] , a : int , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViTModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Optional[int] , a : Any , a : Dict , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : List[str] , a : Tuple , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = ViTForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : List[str] = ViTForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : Dict = config_and_inputs
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = ViTModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(a )
lowerCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[str] = prepare_img()
lowerCAmelCase__ : Dict = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**a )
# verify the logits
lowerCAmelCase__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowerCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : Optional[int] = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(a , interpolate_pos_encoding=a )
# verify the logits
lowerCAmelCase__ : str = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowerCAmelCase__ : int = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : str = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a )
| 353 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 307 | 0 |
"""simple docstring"""
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> str:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while num > 0:
lowerCamelCase_ = num % 8
lowerCamelCase_ = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase ) ))
counter += 1
lowerCamelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(_lowerCamelCase )}'''
def lowerCamelCase__ ( ) -> None:
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 183 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> Dict:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 183 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger()
@dataclass
class a_ :
lowercase = 42
lowercase = field(default_factory=lowerCamelCase )
lowercase = field(default_factory=lowerCamelCase )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
lowercase = 42
lowercase = 42
lowercase = 0
lowercase = field(default_factory=lowerCamelCase )
lowercase = field(default_factory=lowerCamelCase )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise Exception(
F"Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while"
F" destination module has {len(_SCREAMING_SNAKE_CASE )}." )
for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True )-> Optional[Any]:
print(F"Converting {name}..." )
with torch.no_grad():
UpperCamelCase = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ).eval()
UpperCamelCase = ResNetForImageClassification(__UpperCamelCase ).eval()
UpperCamelCase = ModuleTransfer(src=__UpperCamelCase , dest=__UpperCamelCase )
UpperCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCamelCase )
assert torch.allclose(from_model(__UpperCamelCase ) , our_model(__UpperCamelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase = F"resnet{'-'.join(name.split('resnet' ) )}"
print(__UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCamelCase , )
# we can use the convnext one
UpperCamelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCamelCase , )
print(F"Pushed {checkpoint_name}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True )-> str:
UpperCamelCase = """imagenet-1k-id2label.json"""
UpperCamelCase = 1000
UpperCamelCase = (1, num_labels)
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = partial(__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
UpperCamelCase = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCamelCase , names_to_config[model_name] , __UpperCamelCase , __UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 183 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = EfficientNetConfig()
UpperCAmelCase_ = CONFIG_MAP[model_name]["hidden_dim"]
UpperCAmelCase_ = CONFIG_MAP[model_name]["width_coef"]
UpperCAmelCase_ = CONFIG_MAP[model_name]["depth_coef"]
UpperCAmelCase_ = CONFIG_MAP[model_name]["image_size"]
UpperCAmelCase_ = CONFIG_MAP[model_name]["dropout_rate"]
UpperCAmelCase_ = CONFIG_MAP[model_name]["dw_padding"]
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = 10_00
UpperCAmelCase_ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = CONFIG_MAP[model_name]["image_size"]
UpperCAmelCase_ = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=__lowerCamelCase , )
return preprocessor
def lowerCAmelCase_ ( snake_case_ : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
UpperCAmelCase_ = sorted(set(__lowerCamelCase ) )
UpperCAmelCase_ = len(__lowerCamelCase )
UpperCAmelCase_ = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase , range(__lowerCamelCase ) )}
UpperCAmelCase_ = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
UpperCAmelCase_ = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
UpperCAmelCase_ = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase_ = "efficientnet." + item[1]
UpperCAmelCase_ = "classifier.weight"
UpperCAmelCase_ = "classifier.bias"
return key_mapping
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase_ = torch.from_numpy(__lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase_ = torch.from_numpy(__lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase_ = torch.from_numpy(np.transpose(__lowerCamelCase ) )
else:
UpperCAmelCase_ = torch.from_numpy(__lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowerCamelCase )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = model_classes[model_name](
include_top=__lowerCamelCase , weights="imagenet" , input_tensor=__lowerCamelCase , input_shape=__lowerCamelCase , pooling=__lowerCamelCase , classes=10_00 , classifier_activation="softmax" , )
UpperCAmelCase_ = original_model.trainable_variables
UpperCAmelCase_ = original_model.non_trainable_variables
UpperCAmelCase_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase_ = param.numpy()
UpperCAmelCase_ = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase_ = get_efficientnet_config(__lowerCamelCase )
UpperCAmelCase_ = EfficientNetForImageClassification(__lowerCamelCase ).eval()
UpperCAmelCase_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
UpperCAmelCase_ = rename_keys(__lowerCamelCase )
replace_params(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Initialize preprocessor and preprocess input image
UpperCAmelCase_ = convert_image_processor(__lowerCamelCase )
UpperCAmelCase_ = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase_ = hf_model(**__lowerCamelCase )
UpperCAmelCase_ = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase_ = False
UpperCAmelCase_ = CONFIG_MAP[model_name]["image_size"]
UpperCAmelCase_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase_ = image.img_to_array(__lowerCamelCase )
UpperCAmelCase_ = np.expand_dims(__lowerCamelCase , axis=0 )
UpperCAmelCase_ = original_model.predict(__lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowerCamelCase ):
os.mkdir(__lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(__lowerCamelCase )
preprocessor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
UpperCAmelCase_ = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowerCamelCase )
hf_model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 1 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase__ = '1'
UpperCAmelCase__ = '0'
UpperCAmelCase__ = '1'
UpperCAmelCase__ = ort.SessionOptions()
UpperCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
UpperCAmelCase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
UpperCAmelCase__ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase__ = ort.RunOptions()
UpperCAmelCase__ = 128
UpperCAmelCase__ = 1
UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = 2000
UpperCAmelCase__ = {}
for iter in range(max_iters):
UpperCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 288 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any]=0 )-> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 128, 128) , rng=random.Random(a ) )
lowercase__ = np.random.RandomState(a )
lowercase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=a )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**a ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**a ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
# warmup pass to apply optimizations
lowercase__ = pipe(**self.get_dummy_inputs() )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**a ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**a ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**a ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**a ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ = init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'A fantasy landscape, trending on artstation'
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type='np' , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase__ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ = init_image.resize((768, 512) )
lowercase__ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'A fantasy landscape, trending on artstation'
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type='np' , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase__ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 368 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
lowercase__ = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , _SCREAMING_SNAKE_CASE )
if matches:
lowercase__ = float(matches[1] )
lowercase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase__ = 1001
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 'huggingface/label-files'
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
lowercase__ = 'background'
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> int:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
lowercase__ = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
lowercase__ = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase__ = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
lowercase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__ = model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowercase__ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase__ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing to the hub...' )
lowercase__ = 'google/' + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 269 | 0 |
from __future__ import annotations
def UpperCamelCase( __UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : int = [True] * limit
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
lowerCAmelCase_ : Any = i * 2
while index < limit:
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Any = index + i
lowerCAmelCase_ : str = [2]
for i in range(3 ,__lowerCAmelCase ,2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def UpperCamelCase( __UpperCamelCase : Optional[int] = 1000000 ):
lowerCAmelCase_ : Tuple = prime_sieve(__lowerCAmelCase )
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[str] = 0
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + length ,len(__lowerCAmelCase ) ):
lowerCAmelCase_ : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCAmelCase_ : Optional[Any] = j - i
lowerCAmelCase_ : Optional[int] = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 103 |
'''simple docstring'''
def __lowerCAmelCase ():
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCAmelCase (__lowerCAmelCase ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = 0
_UpperCAmelCase : str = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCAmelCase : Union[str, Any] = (left + right) // 2
_UpperCAmelCase : List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCAmelCase : Tuple = mid + 1
else:
_UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def __lowerCAmelCase (__lowerCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def __lowerCAmelCase ():
from timeit import timeit
print("Running benchmarks" )
_UpperCAmelCase : Tuple = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCAmelCase : str = timeit(F"""{func}(grid=grid)""" , setup=__lowerCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 234 | 0 |
'''simple docstring'''
from ....utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
class A ( _a ):
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=20_48 ) -> Dict:
"""simple docstring"""
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 369 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A ( _a ):
lowercase_ = 42
lowercase_ = jnp.floataa
lowercase_ = True
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setup()
_a = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
_a = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A ( _a ):
lowercase_ = FlaxBigBirdForNaturalQuestionsModule
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def cross_entropy(UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=None ):
_a = logits.shape[-1]
_a = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype('''f4''' )
_a = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
_a = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_a = reduction(UpperCamelCase )
return loss
_a = partial(UpperCamelCase , reduction=jnp.mean )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A :
lowercase_ = "google/bigbird-roberta-base"
lowercase_ = 3000
lowercase_ = 1_0500
lowercase_ = 128
lowercase_ = 3
lowercase_ = 1
lowercase_ = 5
# tx_args
lowercase_ = 3e-5
lowercase_ = 0.0
lowercase_ = 2_0000
lowercase_ = 0.0095
lowercase_ = "bigbird-roberta-natural-questions"
lowercase_ = "training-expt"
lowercase_ = "data/nq-training.jsonl"
lowercase_ = "data/nq-validation.jsonl"
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCAmelCase_ )
_a = os.path.join(self.base_dir , self.save_dir )
_a = self.batch_size_per_device * jax.device_count()
@dataclass
class A :
lowercase_ = 42
lowercase_ = 4096 # no dynamic padding on TPUs
def __call__( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = self.collate_fn(lowerCAmelCase_ )
_a = jax.tree_util.tree_map(lowerCAmelCase_ , lowerCAmelCase_ )
return batch
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
_a , _a = self.fetch_inputs(features['''input_ids'''] )
_a = {
'''input_ids''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> List[Any]:
"""simple docstring"""
_a = [self._fetch_inputs(lowerCAmelCase_ ) for ids in input_ids]
return zip(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> str:
"""simple docstring"""
_a = [1 for _ in range(len(lowerCAmelCase_ ) )]
while len(lowerCAmelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Dict=None ):
'''simple docstring'''
if seed is not None:
_a = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
_a = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
def loss_fn(UpperCamelCase : List[str] ):
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
_a , _a , _a = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
_a , _a = jax.random.split(UpperCamelCase )
_a = jax.value_and_grad(UpperCamelCase )
_a , _a = grad_fn(state.params )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_a = jax.lax.pmean(UpperCamelCase , '''batch''' )
_a = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
_a , _a , _a = outputs
_a = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class A ( train_state.TrainState ):
lowercase_ = struct.field(pytree_node=_a )
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
_a = model.params
_a = TrainState.create(
apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , loss_fn=lowerCAmelCase_ , )
if ckpt_dir is not None:
_a , _a , _a , _a , _a = restore_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_a , _a = build_tx(**lowerCAmelCase_ )
_a = train_state.TrainState(
step=lowerCAmelCase_ , apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , opt_state=lowerCAmelCase_ , )
_a = args
_a = data_collator
_a = lr
_a = params
_a = jax_utils.replicate(lowerCAmelCase_ )
return state
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
_a = self.args
_a = len(lowerCAmelCase_ ) // args.batch_size
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(lowerCAmelCase_ , jax.device_count() )
for epoch in range(args.max_epochs ):
_a = jnp.array(0 , dtype=jnp.floataa )
_a = get_batched_dataset(lowerCAmelCase_ , args.batch_size , seed=lowerCAmelCase_ )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc=F'Running EPOCH-{epoch}' ):
_a = self.data_collator(lowerCAmelCase_ )
_a , _a , _a = self.train_step_fn(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_a = jax_utils.unreplicate(state.step )
_a = running_loss.item() / i
_a = self.scheduler_fn(state_step - 1 )
_a = self.evaluate(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowerCAmelCase_ ) )
self.logger.log(lowerCAmelCase_ , commit=lowerCAmelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
_a = get_batched_dataset(lowerCAmelCase_ , self.args.batch_size )
_a = len(lowerCAmelCase_ ) // self.args.batch_size
_a = jnp.array(0 , dtype=jnp.floataa )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc='''Evaluating ... ''' ):
_a = self.data_collator(lowerCAmelCase_ )
_a = self.val_step_fn(lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
_a = jax_utils.unreplicate(lowerCAmelCase_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(lowerCAmelCase_ , params=state.params )
with open(os.path.join(lowerCAmelCase_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCAmelCase_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowerCAmelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCAmelCase_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowerCAmelCase_ )
print('''DONE''' )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(UpperCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.opt_state , f.read() )
_a = joblib.load(os.path.join(UpperCamelCase , '''args.joblib''' ) )
_a = joblib.load(os.path.join(UpperCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(UpperCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_a = json.load(UpperCamelCase )
_a = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = num_train_steps - warmup_steps
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=1e-7 , transition_steps=UpperCamelCase )
_a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def weight_decay_mask(UpperCamelCase : Dict ):
_a = traverse_util.flatten_dict(UpperCamelCase )
_a = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
_a = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 179 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :Tuple) -> List[str]:
_A = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
_A = DetaConfig(
backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , )
# set labels
_A = """huggingface/label-files"""
if "o365" in model_name:
_A = 366
_A = """object365-id2label.json"""
else:
_A = 91
_A = """coco-detection-id2label.json"""
_A = num_labels
_A = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""")) , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def snake_case ( snake_case__ :Tuple) -> Any:
_A = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight"""))
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias"""))
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight"""))
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias"""))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'''))
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias'''))
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight"""))
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias"""))
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight"""))
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias"""))
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight"""))
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias"""))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias'''))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias'''))
# fmt: on
return rename_keys
def snake_case ( snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Any:
_A = dct.pop(snake_case__)
_A = val
def snake_case ( snake_case__ :int , snake_case__ :Union[str, Any]) -> Tuple:
_A = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
_A = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''')
_A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:dim, :]
_A = in_proj_bias[: dim]
_A = in_proj_weight[
dim : dim * 2, :
]
_A = in_proj_bias[
dim : dim * 2
]
_A = in_proj_weight[
-dim :, :
]
_A = in_proj_bias[-dim :]
# fmt: on
def snake_case ( snake_case__ :List[Any] , snake_case__ :int) -> Optional[int]:
# transformer decoder self-attention layers
_A = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
_A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''')
_A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:hidden_size, :]
_A = in_proj_bias[:hidden_size]
_A = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_A = in_proj_bias[hidden_size : hidden_size * 2]
_A = in_proj_weight[-hidden_size:, :]
_A = in_proj_bias[-hidden_size:]
def snake_case ( ) -> Union[str, Any]:
_A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw)
return im
@torch.no_grad()
def snake_case ( snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Optional[int]) -> Union[str, Any]:
_A = get_deta_config(snake_case__)
# load original state dict
if model_name == "deta-swin-large":
_A = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""")
elif model_name == "deta-swin-large-o365":
_A = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""")
else:
raise ValueError(F'''Model name {model_name} not supported''')
_A = torch.load(snake_case__ , map_location="""cpu""")["""model"""]
# original state dict
for name, param in state_dict.items():
print(snake_case__ , param.shape)
# rename keys
_A = create_rename_keys(snake_case__)
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__)
read_in_swin_q_k_v(snake_case__ , config.backbone_config)
read_in_decoder_q_k_v(snake_case__ , snake_case__)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_A = state_dict.pop(snake_case__)
_A = val
if "input_proj" in key:
_A = state_dict.pop(snake_case__)
_A = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_A = state_dict.pop(snake_case__)
_A = val
# finally, create HuggingFace model and load state dict
_A = DetaForObjectDetection(snake_case__)
model.load_state_dict(snake_case__)
model.eval()
_A = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(snake_case__)
# load image processor
_A = DetaImageProcessor(format="""coco_detection""")
# verify our conversion on image
_A = prepare_img()
_A = processor(images=snake_case__ , return_tensors="""pt""")
_A = encoding["""pixel_values"""]
_A = model(pixel_values.to(snake_case__))
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3])
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
_A = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]])
_A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
elif model_name == "deta-swin-large-o365":
_A = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]])
_A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__) , atol=1E-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__) , atol=1E-4)
print("""Everything ok!""")
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''')
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
processor.save_pretrained(snake_case__)
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""")
model.push_to_hub(F'''jozhang97/{model_name}''')
processor.push_to_hub(F'''jozhang97/{model_name}''')
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 180 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''bridgetower_vision_model'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=2_88 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Any:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = num_hidden_layers
_A = num_channels
_A = patch_size
_A = image_size
_A = initializer_factor
_A = layer_norm_eps
_A = stop_gradient
_A = share_layernorm
_A = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''bridgetower_text_model'''
def __init__( self , lowerCAmelCase_=5_02_65 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_14 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = initializer_factor
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''bridgetower'''
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=7_68 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
# TODO: remove this once the Hub files are updated.
_A = kwargs.pop("""text_config_dict""" , lowerCAmelCase_ )
_A = kwargs.pop("""vision_config_dict""" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
_A = share_cross_modal_transformer_layers
_A = hidden_act
_A = hidden_size
_A = initializer_factor
_A = layer_norm_eps
_A = share_link_tower_layers
_A = link_tower_type
_A = num_attention_heads
_A = num_hidden_layers
_A = tie_word_embeddings
_A = init_layernorm_from_vision_encoder
if text_config is None:
_A = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A = BridgeTowerTextConfig(**lowerCAmelCase_ )
_A = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 180 | 1 |
from torch import nn
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a ) -> Tuple:
super().__init__()
lowerCAmelCase_ = class_size
lowerCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase_ = nn.Linear(_a , _a )
def __a ( self , _a ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase_ = self.mlp(_a )
return logits
| 22 |
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''ibert'''
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-12 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Tuple="none" , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Any = quant_mode
_UpperCAmelCase : Optional[Any] = force_dequant
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 145 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__a = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__a = '▁'
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] = BarthezTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , **lowerCAmelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Any = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
_UpperCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,) | 145 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
a__ : List[Any] = False
a__ : Union[str, Any] = False
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return TrainCommand(lowerCAmelCase_ )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> List[str]:
__SCREAMING_SNAKE_CASE = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=UpperCAmelCase__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=UpperCAmelCase__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=UpperCAmelCase__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=UpperCAmelCase__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=UpperCAmelCase__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=UpperCAmelCase__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=UpperCAmelCase__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=UpperCAmelCase__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=UpperCAmelCase__ , default=3_2 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=UpperCAmelCase__ , default=6_4 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=UpperCAmelCase__ , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=UpperCAmelCase__ , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Namespace ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = logging.get_logger("transformers-cli/training" )
__SCREAMING_SNAKE_CASE = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = args.output
__SCREAMING_SNAKE_CASE = args.column_label
__SCREAMING_SNAKE_CASE = args.column_text
__SCREAMING_SNAKE_CASE = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__SCREAMING_SNAKE_CASE = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
__SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__SCREAMING_SNAKE_CASE = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
__SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__SCREAMING_SNAKE_CASE = args.validation_split
__SCREAMING_SNAKE_CASE = args.train_batch_size
__SCREAMING_SNAKE_CASE = args.valid_batch_size
__SCREAMING_SNAKE_CASE = args.learning_rate
__SCREAMING_SNAKE_CASE = args.adam_epsilon
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
raise NotImplementedError
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 195 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCAmelCase_ ):
print(f"""{i}\t\t{d}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [float("inf" )] * vertex_count
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__SCREAMING_SNAKE_CASE = distance[u] + w
__SCREAMING_SNAKE_CASE = check_negative_cycle(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = int(input('''Enter number of vertices: ''').strip())
a__ : Any = int(input('''Enter number of edges: ''').strip())
a__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a__ , a__ , a__ : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a__ : str = {'''src''': src, '''dst''': dest, '''weight''': weight}
a__ : str = int(input('''\nEnter shortest path source:''').strip())
a__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 195 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase ="""\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"""
@dataclass
class A__ ( __lowerCamelCase):
_UpperCAmelCase : Union[PIL.Image.Image, np.ndarray]
class A__ ( __lowerCamelCase):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
super().__init__()
self.register_modules(
prior=__magic_name__ , image_encoder=__magic_name__ , image_processor=__magic_name__ , scheduler=__magic_name__ , renderer=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if latents is None:
lowerCamelCase : Dict = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase : Union[str, Any] = latents.to(__magic_name__ )
lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , __magic_name__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase : int = torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
def UpperCamelCase__ ( self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__magic_name__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if isinstance(__magic_name__ , __magic_name__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase : Any = torch.cat(__magic_name__ , axis=0 ) if image[0].ndim == 4 else torch.stack(__magic_name__ , axis=0 )
if not isinstance(__magic_name__ , torch.Tensor ):
lowerCamelCase : Union[str, Any] = self.image_processor(__magic_name__ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase : str = image.to(dtype=self.image_encoder.dtype , device=__magic_name__ )
lowerCamelCase : List[Any] = self.image_encoder(__magic_name__ )['''last_hidden_state''']
lowerCamelCase : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase : Optional[int] = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase : int = torch.zeros_like(__magic_name__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__( self , __magic_name__ , __magic_name__ = 1 , __magic_name__ = 2_5 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 4.0 , __magic_name__ = 6_4 , __magic_name__ = "pil" , __magic_name__ = True , ):
if isinstance(__magic_name__ , PIL.Image.Image ):
lowerCamelCase : Dict = 1
elif isinstance(__magic_name__ , torch.Tensor ):
lowerCamelCase : Tuple = image.shape[0]
elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase : Optional[Any] = len(__magic_name__ )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__magic_name__ )}''' )
lowerCamelCase : List[Any] = self._execution_device
lowerCamelCase : List[Any] = batch_size * num_images_per_prompt
lowerCamelCase : List[Any] = guidance_scale > 1.0
lowerCamelCase : List[str] = self._encode_image(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# prior
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
lowerCamelCase : List[Any] = self.scheduler.timesteps
lowerCamelCase : Optional[Any] = self.prior.config.num_embeddings
lowerCamelCase : str = self.prior.config.embedding_dim
lowerCamelCase : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __magic_name__ , __magic_name__ , __magic_name__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase : Dict = latents.reshape(latents.shape[0] , __magic_name__ , __magic_name__ )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
lowerCamelCase : Dict = self.prior(
__magic_name__ , timestep=__magic_name__ , proj_embedding=__magic_name__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase : str = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase : Optional[int] = noise_pred.chunk(2 )
lowerCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase : Optional[Any] = self.scheduler.step(
__magic_name__ , timestep=__magic_name__ , sample=__magic_name__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__magic_name__ )
lowerCamelCase : int = []
for i, latent in enumerate(__magic_name__ ):
print()
lowerCamelCase : str = self.renderer.decode(
latent[None, :] , __magic_name__ , size=__magic_name__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(__magic_name__ )
lowerCamelCase : Dict = torch.stack(__magic_name__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
lowerCamelCase : Optional[Any] = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase : Optional[Any] = [self.numpy_to_pil(__magic_name__ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__magic_name__ )
| 287 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "cpu", __magic_name__ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[str] = device
UpperCamelCase__ : Union[str, Any] = CLIPTokenizerFast.from_pretrained(__magic_name__ )
UpperCamelCase__ : Tuple = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCamelCase__ : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCamelCase__ : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCamelCase__ : List[str] = torchvision.transforms.Resize(224 )
UpperCamelCase__ : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.resize(__magic_name__ )
UpperCamelCase__ : Dict = self.center_crop(__magic_name__ )
UpperCamelCase__ : List[str] = self.normalize(__magic_name__ )
return images
def __call__( self, __magic_name__=None, __magic_name__=None, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.tokenizer(text=__magic_name__, **__magic_name__ )
UpperCamelCase__ : List[Any] = self.preprocess_img(__magic_name__ )
UpperCamelCase__ : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__=10, __magic_name__=0.01, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=False, __magic_name__=True, __magic_name__="image", __magic_name__=True, __magic_name__=False, __magic_name__=False, __magic_name__=False, ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = device if device else get_device()
if vqgan:
UpperCamelCase__ : Union[str, Any] = vqgan
else:
UpperCamelCase__ : Any = load_vqgan(self.device, conf_path=__magic_name__, ckpt_path=__magic_name__ )
self.vqgan.eval()
if clip:
UpperCamelCase__ : Optional[Any] = clip
else:
UpperCamelCase__ : Any = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCamelCase__ : str = ProcessorGradientFlow(device=self.device )
UpperCamelCase__ : Union[str, Any] = iterations
UpperCamelCase__ : Tuple = lr
UpperCamelCase__ : Optional[int] = log
UpperCamelCase__ : List[Any] = make_grid
UpperCamelCase__ : Optional[Any] = return_val
UpperCamelCase__ : str = quantize
UpperCamelCase__ : int = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None, __magic_name__=5, __magic_name__=True ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = []
if output_path is None:
UpperCamelCase__ : List[str] = '''./animation.gif'''
if input_path is None:
UpperCamelCase__ : Union[str, Any] = self.save_path
UpperCamelCase__ : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(__magic_name__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__magic_name__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCamelCase__ : Dict = total_duration / len(__magic_name__ )
UpperCamelCase__ : List[Any] = [frame_duration] * len(__magic_name__ )
if extend_frames:
UpperCamelCase__ : List[Any] = 1.5
UpperCamelCase__ : Any = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__magic_name__ ) )
imageio.mimsave(__magic_name__, __magic_name__, duration=__magic_name__ )
print(f"gif saved to {output_path}" )
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None ) -> Any:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCamelCase__ : List[Any] = preprocess(Image.open(__magic_name__ ), target_image_size=256 ).to(self.device )
UpperCamelCase__ : str = preprocess_vqgan(__magic_name__ )
UpperCamelCase__ ,*UpperCamelCase__ : Union[str, Any] = self.vqgan.encode(__magic_name__ )
return z
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.latent.detach().requires_grad_()
UpperCamelCase__ : Any = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ ,*UpperCamelCase__ : int = self.vqgan.quantize(__magic_name__ )
else:
UpperCamelCase__ : Optional[int] = trans_latent
return self.vqgan.decode(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.clip_preprocessor(text=__magic_name__, images=__magic_name__, return_tensors='''pt''', padding=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.clip(**__magic_name__ )
UpperCamelCase__ : Tuple = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__ : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''], __magic_name__, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCamelCase__ : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''], __magic_name__, weights=neg_prompts['''weights'''] )
else:
UpperCamelCase__ : Optional[int] = torch.tensor([1], device=self.device )
UpperCamelCase__ : Tuple = -torch.log(__magic_name__ ) + torch.log(__magic_name__ )
return loss
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = torch.randn_like(self.latent, requires_grad=__magic_name__, device=self.device )
UpperCamelCase__ : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__ : Tuple = self._add_vector(__magic_name__ )
UpperCamelCase__ : Any = loop_post_process(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self._get_CLIP_loss(__magic_name__, __magic_name__, __magic_name__ )
print('''CLIP loss''', __magic_name__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__magic_name__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
wandb.init(reinit=__magic_name__, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCamelCase__ : List[str] = Image.open(__magic_name__ )
UpperCamelCase__ : List[Any] = image.resize((256, 256) )
wandb.log('''Original Image''', wandb.Image(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
if not prompts:
return []
UpperCamelCase__ : int = []
UpperCamelCase__ : str = []
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Optional[Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__magic_name__, (tuple, list) ):
UpperCamelCase__ : Optional[int] = prompt[0]
UpperCamelCase__ : Dict = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = prompt.split(''':''' )
UpperCamelCase__ : List[Any] = float(__magic_name__ )
else:
UpperCamelCase__ : List[str] = prompt
UpperCamelCase__ : Any = 1.0
processed_prompts.append(__magic_name__ )
weights.append(__magic_name__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__magic_name__, device=self.device ),
}
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=True, __magic_name__=False, __magic_name__=True, __magic_name__=True, __magic_name__=None, ) -> str:
"""simple docstring"""
if image_path:
UpperCamelCase__ : Union[str, Any] = self._get_latent(__magic_name__ )
else:
UpperCamelCase__ : Dict = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(__magic_name__, __magic_name__, __magic_name__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__ : Optional[Any] = self.process_prompts(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.process_prompts(__magic_name__ )
if save_final and save_path is None:
UpperCamelCase__ : str = os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
else:
UpperCamelCase__ : int = save_path + '''_''' + get_timestamp()
os.makedirs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = save_path
UpperCamelCase__ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__magic_name__ ) )
UpperCamelCase__ : Optional[Any] = loop_post_process(__magic_name__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(__magic_name__, __magic_name__, __magic_name__ ) ):
if show_intermediate:
show_pil(__magic_name__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__magic_name__ )} )
if show_final:
show_pil(__magic_name__ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 201 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCamelCase : List[str] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__: str = logging.get_logger(__name__)
UpperCamelCase__: List[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """detr"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , __snake_case : Any=True , __snake_case : int=None , __snake_case : Dict=3 , __snake_case : Optional[Any]=100 , __snake_case : str=6 , __snake_case : Tuple=2048 , __snake_case : int=8 , __snake_case : List[Any]=6 , __snake_case : Optional[int]=2048 , __snake_case : Tuple=8 , __snake_case : Tuple=0.0 , __snake_case : Union[str, Any]=0.0 , __snake_case : str=True , __snake_case : Tuple="relu" , __snake_case : Optional[Any]=256 , __snake_case : Optional[Any]=0.1 , __snake_case : Dict=0.0 , __snake_case : Any=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : Tuple=1.0 , __snake_case : Optional[int]=False , __snake_case : Union[str, Any]="sine" , __snake_case : Optional[Any]="resnet50" , __snake_case : str=True , __snake_case : List[Any]=False , __snake_case : Tuple=1 , __snake_case : Union[str, Any]=5 , __snake_case : Optional[int]=2 , __snake_case : int=1 , __snake_case : Optional[int]=1 , __snake_case : Union[str, Any]=5 , __snake_case : Any=2 , __snake_case : Optional[int]=0.1 , **__snake_case : Any , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Union[str, Any] = backbone_config.get('''model_type''' )
UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : int = config_class.from_dict(__snake_case )
# set timm attributes to None
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = None, None, None
UpperCAmelCase : Optional[Any] = use_timm_backbone
UpperCAmelCase : Union[str, Any] = backbone_config
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : Optional[Any] = num_queries
UpperCAmelCase : str = d_model
UpperCAmelCase : List[Any] = encoder_ffn_dim
UpperCAmelCase : Tuple = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : List[Any] = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : List[str] = decoder_attention_heads
UpperCAmelCase : List[str] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : int = activation_dropout
UpperCAmelCase : Union[str, Any] = activation_function
UpperCAmelCase : Any = init_std
UpperCAmelCase : List[str] = init_xavier_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : Optional[int] = decoder_layerdrop
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : Tuple = auxiliary_loss
UpperCAmelCase : Union[str, Any] = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : Optional[int] = use_pretrained_backbone
UpperCAmelCase : Optional[Any] = dilation
# Hungarian matcher
UpperCAmelCase : Optional[int] = class_cost
UpperCAmelCase : Optional[int] = bbox_cost
UpperCAmelCase : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase : List[str] = mask_loss_coefficient
UpperCAmelCase : Union[str, Any] = dice_loss_coefficient
UpperCAmelCase : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase : Tuple = giou_loss_coefficient
UpperCAmelCase : Dict = eos_coefficient
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def A ( self : int ) -> int:
return self.d_model
@classmethod
def A ( cls : List[Any] , __snake_case : PretrainedConfig , **__snake_case : str ) -> str:
return cls(backbone_config=__snake_case , **__snake_case )
def A ( self : Union[str, Any] ) -> Dict[str, any]:
UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : Any = self.backbone_config.to_dict()
UpperCAmelCase : int = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A ( self : List[str] ) -> float:
return 1E-5
@property
def A ( self : str ) -> int:
return 12
| 23 | import math
from datetime import datetime, timedelta
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = year % 1_9
SCREAMING_SNAKE_CASE_ = year % 4
SCREAMING_SNAKE_CASE_ = year % 7
SCREAMING_SNAKE_CASE_ = math.floor(year / 1_0_0 )
SCREAMING_SNAKE_CASE_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
SCREAMING_SNAKE_CASE_ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
SCREAMING_SNAKE_CASE_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_8 )
else:
return datetime(__UpperCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
A : Dict = "will be" if year > datetime.now().year else "was"
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 118 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCamelCase_ : int = [144, 192, 240]
UpperCamelCase_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCamelCase_ : List[Any] = [96, 120, 144]
UpperCamelCase_ : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCamelCase_ : Tuple = [64, 80, 96]
UpperCamelCase_ : List[Any] = [16, 16, 24, 48, 64, 80, 320]
UpperCamelCase_ : Optional[int] = 0.0_5
UpperCamelCase_ : List[str] = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
UpperCamelCase_ : List[str] = 512
UpperCamelCase_ : Union[str, Any] = 16
UpperCamelCase_ : List[str] = 21
UpperCamelCase_ : Optional[int] = 'pascal-voc-id2label.json'
else:
UpperCamelCase_ : Tuple = 1000
UpperCamelCase_ : str = 'imagenet-1k-id2label.json'
UpperCamelCase_ : str = 'huggingface/label-files'
UpperCamelCase_ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : Union[str, Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : List[Any] = idalabel
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=False ):
for i in range(1 , 6 ):
if F"layer_{i}." in name:
UpperCamelCase_ : str = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
UpperCamelCase_ : Tuple = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
UpperCamelCase_ : Optional[int] = name.replace('.block.' , '.' )
if "exp_1x1" in name:
UpperCamelCase_ : int = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
UpperCamelCase_ : List[Any] = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
UpperCamelCase_ : Optional[Any] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
UpperCamelCase_ : Dict = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
UpperCamelCase_ : Any = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
UpperCamelCase_ : Optional[Any] = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
UpperCamelCase_ : Dict = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
UpperCamelCase_ : Tuple = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
UpperCamelCase_ : List[str] = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
UpperCamelCase_ : Dict = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
UpperCamelCase_ : Union[str, Any] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
UpperCamelCase_ : Optional[Any] = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
UpperCamelCase_ : List[Any] = name.replace(F".global_rep.{i}.weight" , '.layernorm.weight' )
if F".global_rep.{i}.bias" in name:
UpperCamelCase_ : Dict = name.replace(F".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
UpperCamelCase_ : Tuple = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
UpperCamelCase_ : Optional[Any] = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
UpperCamelCase_ : int = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
UpperCamelCase_ : Optional[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
UpperCamelCase_ : Tuple = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
UpperCamelCase_ : int = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
UpperCamelCase_ : Optional[Any] = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
UpperCamelCase_ : Tuple = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
UpperCamelCase_ : Any = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
UpperCamelCase_ : Union[str, Any] = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
UpperCamelCase_ : str = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
UpperCamelCase_ : Optional[int] = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
UpperCamelCase_ : List[str] = 'mobilevit.' + name
return name
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=False ):
if base_model:
UpperCamelCase_ : str = ''
else:
UpperCamelCase_ : List[Any] = 'mobilevit.'
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : int = orig_state_dict.pop(lowerCamelCase )
if key[:8] == "encoder.":
UpperCamelCase_ : Dict = key[8:]
if "qkv" in key:
UpperCamelCase_ : Tuple = key.split('.' )
UpperCamelCase_ : List[str] = int(key_split[0][6:] ) - 1
UpperCamelCase_ : Any = int(key_split[3] )
UpperCamelCase_ : Optional[int] = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
UpperCamelCase_ : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCamelCase_ : Dict = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
UpperCamelCase_ : Union[str, Any] = val[:dim, :]
UpperCamelCase_ : List[Any] = val[dim : dim * 2, :]
UpperCamelCase_ : str = val[-dim:, :]
else:
UpperCamelCase_ : Dict = val[:dim]
UpperCamelCase_ : List[Any] = val[dim : dim * 2]
UpperCamelCase_ : List[Any] = val[-dim:]
else:
UpperCamelCase_ : int = val
return orig_state_dict
def __lowercase ( ):
UpperCamelCase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : int = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : int=False ):
UpperCamelCase_ : Optional[int] = get_mobilevit_config(lowerCamelCase )
# load original state_dict
UpperCamelCase_ : Union[str, Any] = torch.load(lowerCamelCase , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
UpperCamelCase_ : Union[str, Any] = MobileViTForSemanticSegmentation(lowerCamelCase ).eval()
else:
UpperCamelCase_ : Tuple = MobileViTForImageClassification(lowerCamelCase ).eval()
UpperCamelCase_ : int = convert_state_dict(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase_ : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCamelCase_ : int = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase_ : List[str] = model(**lowerCamelCase )
UpperCamelCase_ : Any = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCamelCase_ : Tuple = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCamelCase_ : Optional[Any] = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8_6_2_4, -9.5_9_6_4], [-10.8840, -10.8158, -10.6659]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCamelCase_ : Any = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
UpperCamelCase_ : Optional[Any] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
UpperCamelCase_ : List[str] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1e-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
UpperCamelCase_ : List[str] = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
UpperCamelCase_ : Optional[int] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization='apple' )
model.push_to_hub(lowerCamelCase , organization='apple' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 358 | a_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 50 | 0 |
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
A__ = 6
A__ = 1
A__ = 1_9_0_1
A__ = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
A__ = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 1_2:
year += 1
A__ = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 274 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __lowerCamelCase ( lowerCAmelCase__ = True , *lowerCAmelCase__ , **lowerCAmelCase__ ):
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
lowerCAmelCase__ = False
if main_process_only:
lowerCAmelCase__ = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ , disable=lowerCAmelCase__ )
| 350 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = GPTSanJapaneseTokenizer
UpperCamelCase_ : str = False
UpperCamelCase_ : Any = {"""do_clean_text""": False, """add_prefix_space""": False}
def _snake_case ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# fmt: off
A: Optional[Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
A: Dict = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
A: str = {'''unk_token''': '''<unk>'''}
A: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Any , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A: List[str] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
A: Any = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> str:
'''simple docstring'''
A , A: List[Any] = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
A: str = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def _snake_case ( self : List[Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self : List[Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self : int ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self : Tuple ) -> str:
'''simple docstring'''
A: Union[str, Any] = self.get_tokenizer()
# Testing tokenization
A: str = '''こんにちは、世界。 こんばんは、㔺界。'''
A: List[str] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
A: Tuple = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A: str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
A: int = tokens + [tokenizer.unk_token]
A: Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A: int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A: Tuple = self.get_tokenizer()
# Testing tokenization
A: Tuple = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
A: int = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
A: int = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Dict ) -> str:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
A: List[Any] = '''こんにちは、世界。'''
A: List[str] = '''こんばんは、㔺界。😀'''
A: Union[str, Any] = '''こんにちは、世界。こんばんは、世界。😀'''
A: List[str] = tokenizer.encode(prefix_text + input_text )
A: List[str] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
A: str = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
A: Optional[int] = '''こんにちは、世界。'''
A: Optional[Any] = '''こんばんは、㔺界。😀'''
A: Dict = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
A: Optional[int] = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
A: str = [1] + [0] * (len_prefix + len_text + 1)
A: Tuple = [1] * (len_prefix + len_text + 1) + [0]
A: List[str] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A: Dict = tokenizer(prefix_text + input_text ).token_type_ids
A: int = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
A: List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : List[Any] ) -> int:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
A: List[str] = tokenizer.encode('''あンいワ''' )
A: Optional[Any] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
A: Optional[Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
A: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: str = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
A: Optional[int] = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
A: int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A: List[str] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Any ) -> str:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> List[str]:
'''simple docstring'''
pass
| 319 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 1 |
'''simple docstring'''
import functools
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> int:
"""simple docstring"""
# Validation
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowerCAmelCase = tmp_path_factory.getbasetemp() / """cache"""
lowerCAmelCase = test_hf_cache_home / """datasets"""
lowerCAmelCase = test_hf_cache_home / """metrics"""
lowerCAmelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="""session""" )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _SCREAMING_SNAKE_CASE ) | 187 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def _lowerCamelCase( lowercase__ ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray:
'''simple docstring'''
__lowercase= np.nan
for i in range(lowercase__ ):
__lowercase= features[:, labels == i]
__lowercase= data.mean(1 )
# Centralize the data of class i
__lowercase= data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowercase= np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray:
'''simple docstring'''
__lowercase= features.mean(1 )
__lowercase= np.nan
for i in range(lowercase__ ):
__lowercase= features[:, labels == i]
__lowercase= data.shape[1]
__lowercase= data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowercase= device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _lowerCamelCase( lowercase__ , lowercase__ ) -> np.ndarray:
'''simple docstring'''
if features.any():
__lowercase= features.mean(1 )
# Center the dataset
__lowercase= features - np.reshape(lowercase__ , (data_mean.size, 1) )
__lowercase= np.dot(lowercase__ , centered_data.T ) / features.shape[1]
__lowercase, __lowercase= np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowercase= eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowercase= np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowercase, __lowercase= eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
__lowercase= eigenvectors[:, ::-1][:, :dimensions]
__lowercase, __lowercase, __lowercase= np.linalg.svd(lowercase__ )
__lowercase= svd_matrix[:, 0:dimensions]
__lowercase= np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowercase= np.array([0, 0, 0, 1, 1] )
__lowercase= 2
__lowercase= 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
__lowercase= linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowercase= 2
__lowercase= np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
__lowercase= principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase= len(lowercase__ )
__lowercase= max(lowercase__ )
__lowercase= min(lowercase__ )
# create the counting array
__lowercase= coll_max + 1 - coll_min
__lowercase= [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
__lowercase= counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase= [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
__lowercase= collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 295 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A : List[str] = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ["""ViTFeatureExtractor"""]
_A : List[str] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
import os
from pathlib import Path
def _a ( ) -> Tuple:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : List[Any] = Path(UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
lowerCamelCase__ : Any = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , UpperCAmelCase , with_cuda=UpperCAmelCase , extra_include_paths=[str(UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 265 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
def merge(lowercase__ , lowercase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase__ ) <= 1:
return collection
_lowerCamelCase : Dict = len(lowercase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""") | 96 |
"""simple docstring"""
# Imports
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
if red is not None:
_lowerCamelCase : Optional[int] = red
if green is not None:
_lowerCamelCase : Optional[Any] = green
if blue is not None:
_lowerCamelCase : Tuple = blue
if red_edge is not None:
_lowerCamelCase : Optional[Any] = red_edge
if nir is not None:
_lowerCamelCase : Union[str, Any] = nir
return True
def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
_lowerCamelCase : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A_ ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A_ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A_ ( self ):
return self.nir * (self.red / (self.green**2))
def A_ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A_ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def A_ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def A_ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A_ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def A_ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A_ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A_ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A_ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A_ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A_ ( self ):
return (self.nir / self.green) - 1
def A_ ( self ):
return (self.nir / self.redEdge) - 1
def A_ ( self ):
return (self.red - self.blue) / self.red
def A_ ( self ):
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A_ ( self ):
return self.nir - self.green
def A_ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A_ ( self ):
_lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A_ ( self , lowercase=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def A_ ( self , lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A_ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A_ ( self , lowercase=None , lowercase=None ):
return (self.nir - b) / (a * self.red)
def A_ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A_ ( self ):
return (self.red + self.green + self.blue) / 30.5
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def A_ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A_ ( self ):
return self.green / (self.nir + self.red + self.green)
def A_ ( self ):
return self.nir / (self.nir + self.red + self.green)
def A_ ( self ):
return self.red / (self.nir + self.red + self.green)
def A_ ( self ):
return (self.green - self.red) / (self.green + self.red)
def A_ ( self ):
return (self.red - self.green) / (self.red + self.green)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A_ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def A_ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 | 1 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCamelCase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] , __A : Dict , __A : Dict ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_SCREAMING_SNAKE_CASE = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , lowerCAmelCase__ , )
is not None
):
_SCREAMING_SNAKE_CASE = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_SCREAMING_SNAKE_CASE = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_SCREAMING_SNAKE_CASE = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_SCREAMING_SNAKE_CASE = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_SCREAMING_SNAKE_CASE = True
if not attribute_used:
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_SCREAMING_SNAKE_CASE = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_SCREAMING_SNAKE_CASE = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_SCREAMING_SNAKE_CASE = True
elif attribute.endswith("_token_id" ):
_SCREAMING_SNAKE_CASE = True
# configuration class specific cases
if not case_allowed:
_SCREAMING_SNAKE_CASE = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_SCREAMING_SNAKE_CASE = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> str:
_SCREAMING_SNAKE_CASE = dict(inspect.signature(config_class.__init__ ).parameters )
_SCREAMING_SNAKE_CASE = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_SCREAMING_SNAKE_CASE = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_SCREAMING_SNAKE_CASE = {}
if len(config_class.attribute_map ) > 0:
_SCREAMING_SNAKE_CASE = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_SCREAMING_SNAKE_CASE = inspect.getsourcefile(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_SCREAMING_SNAKE_CASE = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith("modeling_" )]
# Get the source code strings
_SCREAMING_SNAKE_CASE = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
_SCREAMING_SNAKE_CASE = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
_SCREAMING_SNAKE_CASE = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_SCREAMING_SNAKE_CASE = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_SCREAMING_SNAKE_CASE = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_SCREAMING_SNAKE_CASE = unused_attributes
if len(lowerCAmelCase__ ) > 0:
_SCREAMING_SNAKE_CASE = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 366 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCamelCase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE = torchvision.models.resnetaaa(pretrained=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(model.children() )[:-2]
_SCREAMING_SNAKE_CASE = nn.Sequential(*__lowerCamelCase )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_SCREAMING_SNAKE_CASE = self.pool(self.model(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.flatten(__lowerCamelCase , start_dim=2 )
_SCREAMING_SNAKE_CASE = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [json.loads(__lowerCamelCase ) for l in open(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE = os.path.dirname(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer
_SCREAMING_SNAKE_CASE = labels
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = max_seq_length
_SCREAMING_SNAKE_CASE = transforms
def __len__( self : int ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : str , __lowerCamelCase : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = sentence[0], sentence[1:-1], sentence[-1]
_SCREAMING_SNAKE_CASE = sentence[: self.max_seq_length]
_SCREAMING_SNAKE_CASE = torch.zeros(self.n_classes )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
_SCREAMING_SNAKE_CASE = self.transforms(__lowerCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Dict:
_SCREAMING_SNAKE_CASE = [len(row["sentence"] ) for row in batch]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = len(__A ), max(__A )
_SCREAMING_SNAKE_CASE = torch.zeros(__A , __A , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
_SCREAMING_SNAKE_CASE = input_row["sentence"]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = torch.stack([row["image"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["label"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["image_start_token"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 111 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase ( __lowerCamelCase : int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase : Tuple , __lowerCamelCase : int ) -> bool:
snake_case : Union[str, Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case : Optional[int] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple = 0.0 , __lowerCamelCase : Dict = 1.0 , ):
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Any = 0.0 , __lowerCamelCase : List[str] = 1.0 ):
def identity_function(__lowerCamelCase : Optional[Any] ) -> float:
return x
snake_case : List[Any] = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case : Dict = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def UpperCamelCase ( __lowerCamelCase : List[str] ):
def function_to_integrate(__lowerCamelCase : List[Any] ) -> float:
return sqrt(4.0 - x * x )
snake_case : int = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 0 |
from __future__ import annotations
__UpperCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__UpperCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _snake_case ( A ) -> list[float]:
lowerCAmelCase__ = []
lowerCAmelCase__ = len(A )
for i in range(A ):
lowerCAmelCase__ = -1
for j in range(i + 1 , A ):
if arr[i] < arr[j]:
lowerCAmelCase__ = arr[j]
break
result.append(A )
return result
def _snake_case ( A ) -> list[float]:
lowerCAmelCase__ = []
for i, outer in enumerate(A ):
lowerCAmelCase__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCAmelCase__ = inner
break
result.append(A )
return result
def _snake_case ( A ) -> list[float]:
lowerCAmelCase__ = len(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = [-1] * arr_size
for index in reversed(range(A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCAmelCase__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__UpperCAmelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
) | 361 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 6_55_36 , lowerCamelCase_ = None , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = 0 , lowerCamelCase_ = "fourier" , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = 0.0 , lowerCamelCase_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase_ = "UNetMidBlock1D" , lowerCamelCase_ = None , lowerCamelCase_ = (32, 32, 64) , lowerCamelCase_ = None , lowerCamelCase_ = 8 , lowerCamelCase_ = 1 , lowerCamelCase_ = False , ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase_ , log=lowerCamelCase_ , flip_sin_to_cos=lowerCamelCase_ )
lowerCAmelCase__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase_ , downscale_freq_shift=lowerCamelCase_ )
lowerCAmelCase__ = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase__ = block_out_channels[0] * 4
lowerCAmelCase__ = TimestepEmbedding(
in_channels=lowerCamelCase_ , time_embed_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ , out_dim=block_out_channels[0] , )
lowerCAmelCase__ = nn.ModuleList([] )
lowerCAmelCase__ = None
lowerCAmelCase__ = nn.ModuleList([] )
lowerCAmelCase__ = None
# down
lowerCAmelCase__ = in_channels
for i, down_block_type in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase__ = i == len(lowerCamelCase_ ) - 1
lowerCAmelCase__ = get_down_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase_ )
# mid
lowerCAmelCase__ = get_mid_block(
lowerCamelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase_ , add_downsample=lowerCamelCase_ , )
# up
lowerCAmelCase__ = list(reversed(lowerCamelCase_ ) )
lowerCAmelCase__ = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase__ = out_channels
else:
lowerCAmelCase__ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase_ ) - 1 else final_upsample_channels
)
lowerCAmelCase__ = i == len(lowerCamelCase_ ) - 1
lowerCAmelCase__ = get_up_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase_ )
lowerCAmelCase__ = output_channel
# out
lowerCAmelCase__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase__ = get_out_block(
out_block_type=lowerCamelCase_ , num_groups_out=lowerCamelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase_ , act_fn=lowerCamelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCAmelCase__ = timestep
if not torch.is_tensor(lowerCamelCase_ ):
lowerCAmelCase__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ = timesteps[None].to(sample.device )
lowerCAmelCase__ = self.time_proj(lowerCamelCase_ )
if self.config.use_timestep_embedding:
lowerCAmelCase__ = self.time_mlp(lowerCamelCase_ )
else:
lowerCAmelCase__ = timestep_embed[..., None]
lowerCAmelCase__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase__ = ()
for downsample_block in self.down_blocks:
lowerCAmelCase__ , lowerCAmelCase__ = downsample_block(hidden_states=lowerCamelCase_ , temb=lowerCamelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase__ = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase__ = down_block_res_samples[-1:]
lowerCAmelCase__ = down_block_res_samples[:-1]
lowerCAmelCase__ = upsample_block(lowerCamelCase_ , res_hidden_states_tuple=lowerCamelCase_ , temb=lowerCamelCase_ )
# 5. post-process
if self.out_block:
lowerCAmelCase__ = self.out_block(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase_ ) | 228 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
UpperCAmelCase__ : str = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ : Union[str, Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
assert base_extractor.is_extractable(UpperCamelCase__ )
UpperCAmelCase__ : int = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
UpperCAmelCase__ : str = output_path.read_text(encoding="""utf-8""" )
UpperCAmelCase__ : Union[str, Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
UpperCAmelCase__ : Dict = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
UpperCAmelCase__ : List[str] = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
UpperCAmelCase__ : Dict = Extractor.infer_extractor_format(UpperCamelCase__ )
assert extractor_format is not None
UpperCAmelCase__ : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Dict = file_path.read_text(encoding="""utf-8""" )
else:
UpperCAmelCase__ : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
UpperCAmelCase__ : str = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
import tarfile
UpperCAmelCase__ : Optional[int] = tmp_path / """data_dot_dot"""
directory.mkdir()
UpperCAmelCase__ : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(UpperCamelCase__ , """w""" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase__ ):
import tarfile
UpperCAmelCase__ : List[str] = tmp_path / """data_sym_link"""
directory.mkdir()
UpperCAmelCase__ : Optional[int] = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=UpperCamelCase__ )
with tarfile.TarFile(UpperCamelCase__ , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
UpperCAmelCase__ : str = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ : Union[str, Any] = tmp_path / """extracted"""
TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _UpperCamelCase ( UpperCamelCase__ ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCAmelCase__ : Tuple = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ : Any = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(UpperCamelCase__ )
assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right | 163 | 0 |
"""simple docstring"""
_lowercase : dict[tuple[int, int, int], int] = {}
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCamelCase__ : Optional[Any] =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCamelCase__ : Any =_calculate(days - 1 , __lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCamelCase__ : Optional[Any] =_calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCamelCase__ : Dict =_calculate(days - 1 , __lowerCamelCase , 0 )
lowerCamelCase__ : List[str] =state_late + state_absent + state_ontime
lowerCamelCase__ : Union[str, Any] =prizestrings
return prizestrings
def snake_case__ ( __lowerCamelCase : int = 30 ):
"""simple docstring"""
return _calculate(__lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 272 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Dict="", lowerCamelCase : Tuple="train" )-> Dict:
assert os.path.isdir(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Dict =os.listdir(lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ : Optional[int] =os.path.join(lowerCamelCase, lowerCamelCase )
if not os.path.isfile(lowerCamelCase ):
continue
self.documents.append(lowerCamelCase )
def __len__( self : Optional[Any] )-> List[str]:
return len(self.documents )
def __getitem__( self : List[str], lowerCamelCase : Dict )-> str:
lowerCamelCase__ : int =self.documents[idx]
lowerCamelCase__ : List[Any] =document_path.split('''/''' )[-1]
with open(lowerCamelCase, encoding='''utf-8''' ) as source:
lowerCamelCase__ : Optional[int] =source.read()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =process_story(lowerCamelCase )
return document_name, story_lines, summary_lines
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =list(filter(lambda __lowerCamelCase : len(__lowerCamelCase ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ : Dict =[_add_missing_period(__lowerCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Optional[Any] =deque(__lowerCamelCase )
while True:
try:
lowerCamelCase__ : Tuple =lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__lowerCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ : Dict =list(filter(lambda __lowerCamelCase : not t.startswith('''@highlight''' ) , __lowerCamelCase ) )
return story_lines, summary_lines
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Any =['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if len(__lowerCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowerCamelCase )) )
return sequence
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : int =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Any =sequence == pad_token_id
lowerCamelCase__ : List[str] =0
return mask
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =[tokenizer.encode(__lowerCamelCase ) for line in story_lines]
lowerCamelCase__ : List[Any] =[token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ : List[Any] =[tokenizer.encode(__lowerCamelCase ) for line in summary_lines]
lowerCamelCase__ : Optional[int] =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Any =[]
for sequence in batch:
lowerCamelCase__ : Optional[int] =-1
lowerCamelCase__ : List[str] =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowerCamelCase )
return torch.tensor(__lowerCamelCase )
| 272 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=1_000 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = range_bbox
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a = bbox[i, j, 3]
__a = bbox[i, j, 1]
__a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__a = bbox[i, j, 2]
__a = bbox[i, j, 0]
__a = t
__a = tf.convert_to_tensor(a__ )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = TFLayoutLMModel(config=a__ )
__a = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
__a = model(a__ , a__ , token_type_ids=a__ )
__a = model(a__ , a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = TFLayoutLMForMaskedLM(config=a__ )
__a = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFLayoutLMForSequenceClassification(config=a__ )
__a = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFLayoutLMForTokenClassification(config=a__ )
__a = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__a = TFLayoutLMForQuestionAnswering(config=a__ )
__a = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : List[Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : int = True
__UpperCAmelCase : List[str] = 1_0
def __UpperCAmelCase ( self ):
__a = TFLayoutLMModelTester(self )
__a = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFLayoutLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def __UpperCAmelCase ( self ):
pass
def lowercase ( ) -> Optional[int]:
__a = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
__a = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__a = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__a = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__a = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the sequence output on [0, :3, :3]
__a = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1E-3 ) )
# test the pooled output on [1, :3]
__a = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self ):
__a = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__a = outputs.loss
__a = (2,)
self.assertEqual(loss.shape , a__ )
# test the shape of the logits
__a = outputs.logits
__a = (2, 2)
self.assertEqual(logits.shape , a__ )
@slow
def __UpperCAmelCase ( self ):
__a = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
# test the shape of the logits
__a = outputs.logits
__a = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a__ )
@slow
def __UpperCAmelCase ( self ):
__a = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__a , __a , __a , __a , __a = prepare_layoutlm_batch_inputs()
# forward pass
__a = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the shape of the logits
__a = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a__ )
self.assertEqual(outputs.end_logits.shape , a__ )
| 353 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( A__ ):
A__ = 'time_series_transformer'
A__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , _a : Optional[int] = None , _a : Optional[int] = None , _a : str = "student_t" , _a : str = "nll" , _a : int = 1 , _a : List[int] = [1, 2, 3, 4, 5, 6, 7] , _a : Optional[Union[str, bool]] = "mean" , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : Optional[List[int]] = None , _a : Optional[List[int]] = None , _a : int = 32 , _a : int = 32 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : bool = True , _a : str = "gelu" , _a : int = 64 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : int = 100 , _a : float = 0.02 , _a : Union[str, Any]=True , **_a : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =prediction_length
_SCREAMING_SNAKE_CASE =context_length or prediction_length
_SCREAMING_SNAKE_CASE =distribution_output
_SCREAMING_SNAKE_CASE =loss
_SCREAMING_SNAKE_CASE =input_size
_SCREAMING_SNAKE_CASE =num_time_features
_SCREAMING_SNAKE_CASE =lags_sequence
_SCREAMING_SNAKE_CASE =scaling
_SCREAMING_SNAKE_CASE =num_dynamic_real_features
_SCREAMING_SNAKE_CASE =num_static_real_features
_SCREAMING_SNAKE_CASE =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_SCREAMING_SNAKE_CASE =cardinality
else:
_SCREAMING_SNAKE_CASE =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_SCREAMING_SNAKE_CASE =embedding_dimension
else:
_SCREAMING_SNAKE_CASE =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_SCREAMING_SNAKE_CASE =num_parallel_samples
# Transformer architecture configuration
_SCREAMING_SNAKE_CASE =input_size * len(_a ) + self._number_of_features
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =decoder_layerdrop
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =use_cache
super().__init__(is_encoder_decoder=_a , **_a )
@property
def A ( self : List[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 |
'''simple docstring'''
lowerCamelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : int = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A : Optional[Any] = logging.getLogger()
def __UpperCamelCase ( _A : Path , _A : list ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ="""\n""".join(_A )
Path(_A ).open("""w""" ).writelines(_A )
__A : List[str] = 'patrickvonplaten/t5-tiny-random'
__A : List[Any] = 'sshleifer/bart-tiny-random'
__A : List[str] = 'sshleifer/tiny-mbart'
__A : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ =[""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
run_generate()
assert Path(_SCREAMING_SNAKE_CASE ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self )-> List[Any]:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ ={
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() )
lowerCamelCase_ =str(tmp_dir / """scores.json""" )
lowerCamelCase_ =str(tmp_dir / """val.target""" )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""en"""] )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""de"""] )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {str(_SCREAMING_SNAKE_CASE )}\n {str(_SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
with CaptureStdout() as cs:
run_search()
lowerCamelCase_ =[""" num_beams | length_penalty""", model, """Best score args"""]
lowerCamelCase_ =["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_SCREAMING_SNAKE_CASE )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_SCREAMING_SNAKE_CASE ).exists()
os.remove(Path(_SCREAMING_SNAKE_CASE ) )
| 49 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = '▁'
__A : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = BertGenerationTokenizer
_UpperCamelCase:List[str] = False
_UpperCamelCase:List[Any] = True
def _snake_case ( self )-> Optional[int]:
super().setUp()
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self )-> Any:
lowerCamelCase_ ="""<s>"""
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1002 )
def _snake_case ( self )-> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self )-> Any:
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self )-> str:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ ="""Hello World!"""
lowerCamelCase_ =[1_8536, 2260, 101]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase_ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _snake_case ( self )-> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ =""" """.join(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BertGenerationConfig()
lowerCamelCase_ =BertGenerationEncoder(_SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> int:
# fmt: off
lowerCamelCase_ ={"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 49 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase__ : Optional[List[str]] = None
lowercase__ : List[str] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase__ : Union[str, Any] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = True
_snake_case = None
# Automatically constructed
_snake_case = "PIL.Image.Image"
_snake_case = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_snake_case = field(default='Image' , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self )-> List[str]:
'''simple docstring'''
return self.pa_type
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
__UpperCamelCase = {}
__UpperCamelCase , __UpperCamelCase = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
else:
__UpperCamelCase = path.split('''::''' )[-1]
try:
__UpperCamelCase = string_to_dict(SCREAMING_SNAKE_CASE_ , config.HUB_DATASETS_URL )['''repo_id''']
__UpperCamelCase = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ )
except ValueError:
__UpperCamelCase = None
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' , use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
__UpperCamelCase = BytesIO(f.read() )
__UpperCamelCase = PIL.Image.open(bytes_ )
else:
__UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def A__ ( self )-> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
__UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__UpperCamelCase = storage.field('''bytes''' )
else:
__UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__UpperCamelCase = storage.field('''path''' )
else:
__UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__UpperCamelCase = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
__UpperCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ ):
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
__UpperCamelCase = f.read()
return bytes_
__UpperCamelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCamelCase = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def A_ ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A_ ( snake_case : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
__UpperCamelCase = BytesIO()
if image.format in list_image_compression_formats():
__UpperCamelCase = image.format
else:
__UpperCamelCase = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(snake_case , format=snake_case )
return buffer.getvalue()
def A_ ( snake_case : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(snake_case , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(snake_case )}
def A_ ( snake_case : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
__UpperCamelCase = array.dtype
__UpperCamelCase = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__UpperCamelCase = dtype.kind
__UpperCamelCase = dtype.itemsize
__UpperCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__UpperCamelCase = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__UpperCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__UpperCamelCase = dtype_byteorder + dtype_kind + str(snake_case )
__UpperCamelCase = np.dtype(snake_case )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
__UpperCamelCase = PIL.Image.fromarray(array.astype(snake_case ) )
return {"path": None, "bytes": image_to_bytes(snake_case )}
def A_ ( snake_case : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
__UpperCamelCase , __UpperCamelCase = first_non_null_value(snake_case )
if isinstance(snake_case , snake_case ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(snake_case , np.ndarray ):
__UpperCamelCase = no_op_if_value_is_null(snake_case )
return [obj_to_image_dict_func(snake_case ) for obj in objs]
elif isinstance(snake_case , PIL.Image.Image ):
__UpperCamelCase = no_op_if_value_is_null(snake_case )
return [obj_to_image_dict_func(snake_case ) for obj in objs]
else:
return objs
else:
return objs
| 328 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
SCREAMING_SNAKE_CASE_ : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , num_train_timesteps=1_000 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = image / 2 + 0.5
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCAmelCase__ , 'half' ):
SCREAMING_SNAKE_CASE = module.half()
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ) -> Union[str, Any]:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __A ( self ) -> List[Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def __A ( self ) -> int:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self ) -> int:
return super().test_save_load_optional_components()
@skip_mps
def __A ( self ) -> Optional[int]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-4'
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder='scheduler' )
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(
lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'A black colored car'
SCREAMING_SNAKE_CASE = 'A blue colored car'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-4'
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder='scheduler' )
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'A black colored car'
SCREAMING_SNAKE_CASE = 'A blue colored car'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 | 1 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase__ (snake_case__ : Iterable[str] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = iter(snake_case__ )
while True:
_snake_case : List[str] = tuple(itertools.islice(snake_case__ , snake_case__ ) )
if not chunk:
return
yield chunk
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
_snake_case : List[str] = """"""
if len(snake_case__ ) < 2:
return dirty
for i in range(len(snake_case__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case__ ) & 1:
clean += "X"
return clean
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Dict = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_snake_case : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case__ )
return table
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = generate_table(snake_case__ )
_snake_case : Tuple = prepare_input(snake_case__ )
_snake_case : int = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
_snake_case , _snake_case : int = divmod(table.index(snake_case__ ) , 5 )
_snake_case , _snake_case : Dict = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = generate_table(snake_case__ )
_snake_case : List[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
_snake_case , _snake_case : Optional[int] = divmod(table.index(snake_case__ ) , 5 )
_snake_case , _snake_case : Tuple = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 64 |
"""simple docstring"""
from math import ceil
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(range(0 , snake_case__ ) )
SCREAMING_SNAKE_CASE__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE__ = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
SCREAMING_SNAKE_CASE__ = [i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE__ = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(range(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = int(ceil(n_layers / len(snake_case__ ) ) )
SCREAMING_SNAKE_CASE__ = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 165 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : int = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ = "" ) -> dict[str, float]:
"""simple docstring"""
A__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A__ = BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' )
A__ = soup.find_all('''td''' , attrs='''titleColumn''' )
A__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def SCREAMING_SNAKE_CASE ( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
A__ = get_imdb_top_aaa_movies()
with open(lowercase_ , '''w''' , newline='''''' ) as out_file:
A__ = csv.writer(lowercase_ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 231 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""'float' object cannot be interpreted as an integer""")
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""'str' object cannot be interpreted as an integer""")
if num == 0:
return "0b0"
__snake_case: Dict = False
if num < 0:
__snake_case: List[str] = True
__snake_case: int = -num
__snake_case: list[int] = []
while num > 0:
binary.insert(0 , num % 2)
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__) for e in binary)
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__) for e in binary)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Dict , A : Any ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(A ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[int]=0 , A : Tuple=(4, 4, 64, 64) , A : Tuple=False ):
__snake_case: Dict = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: str = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return image
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any=False , A : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
__snake_case: List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: Union[str, Any] = """bf16""" if fpaa else None
__snake_case , __snake_case: Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
A , subfolder="""unet""" , dtype=A , revision=A )
return model, params
def UpperCAmelCase__ ( self : Tuple , A : Tuple=0 , A : str=(4, 77, 768) , A : List[str]=False ):
__snake_case: Any = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: Dict = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Any] , A : str , A : Any ):
__snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=A )
__snake_case: Tuple = self.get_latents(A , fpaa=A )
__snake_case: int = self.get_encoder_hidden_states(A , fpaa=A )
__snake_case: List[Any] = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
__snake_case: str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case: Optional[int] = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A , A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Optional[Any] , A : int , A : Tuple , A : List[str] ):
__snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=A )
__snake_case: Optional[int] = self.get_latents(A , shape=(4, 4, 96, 96) , fpaa=A )
__snake_case: str = self.get_encoder_hidden_states(A , shape=(4, 77, 1_024) , fpaa=A )
__snake_case: str = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
__snake_case: Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case: Any = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A , A , atol=1E-2 )
| 111 | 1 |
import os
import numpy
import onnx
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = a.name
__magic_name__ : Optional[int] = b.name
__magic_name__ : List[Any] = ""
__magic_name__ : Optional[Any] = ""
__magic_name__ : Dict = a == b
__magic_name__ : int = name_a
__magic_name__ : Any = name_b
return res
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_snake_case , _snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ) -> List[str]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_snake_case , _snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = list(model.graph.initializer )
__magic_name__ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__magic_name__ : List[Any] = inits[i].name
__magic_name__ : Any = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
__magic_name__ : Optional[Any] = os.path.dirname(_snake_case )
__magic_name__ : Tuple = os.path.basename(_snake_case )
__magic_name__ : Optional[Any] = onnx.load(os.path.join(_snake_case , _snake_case ) )
__magic_name__ : Union[str, Any] = list(model.graph.initializer )
__magic_name__ : List[Any] = set()
__magic_name__ : str = {}
__magic_name__ : List[Any] = []
__magic_name__ : Union[str, Any] = 0
for i in range(len(_snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_snake_case )
dup_set.add(_snake_case )
__magic_name__ : Optional[int] = inits[j].data_type
__magic_name__ : int = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _snake_case )
total_reduced_size += mem_size
__magic_name__ : Optional[Any] = inits[i].name
__magic_name__ : str = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_snake_case )
else:
__magic_name__ : List[str] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" )
__magic_name__ : Optional[Any] = sorted(_snake_case )
_remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case )
__magic_name__ : Tuple = "optimized_" + model_file_name
__magic_name__ : str = os.path.join(_snake_case , _snake_case )
onnx.save(_snake_case , _snake_case )
return new_model
| 359 |
import numpy
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ : Any = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ : Tuple = numpy.zeros(output_array.shape )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
for iteration in range(1 , iterations + 1 ):
__magic_name__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : str = input_arr
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 41 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase ( _UpperCamelCase : Tuple=None ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(add_help=_UpperCamelCase , allow_abbrev=_UpperCamelCase )
# The main config parser
__UpperCAmelCase : Tuple = config_command_parser(_UpperCamelCase )
# The subparser to add commands to
__UpperCAmelCase : Tuple = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(_UpperCamelCase , parents=[parent_parser] )
update_command_parser(_UpperCamelCase , parents=[parent_parser] )
return config_parser
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = get_config_parser()
__UpperCAmelCase : Optional[Any] = config_parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 115 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 115 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE : List[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> int:
if rng is None:
_lowercase : Optional[int] = random.Random()
_lowercase : str = 1
for dim in shape:
total_dims *= dim
_lowercase : int = []
for _ in range(lowerCamelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowercase : List[Any] = np.array(lowerCamelCase_ , dtype=jnp.intaa ).reshape(lowerCamelCase_ )
return output
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> Dict:
_lowercase : Tuple = ids_tensor(lowerCamelCase_ , vocab_size=2 , rng=lowerCamelCase_ )
# make sure that at least one token is attended to for each batch
_lowercase : int = 1
return attn_mask
@require_flax
class _lowerCamelCase:
lowercase_ : Tuple = None
lowercase_ : Dict = ()
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowercase : Optional[Any] = 2
_lowercase : Optional[int] = inputs['input_ids'].shape[-1] // 2
_lowercase : str = inputs['input_ids'][:max_batch_size, :sequence_length]
_lowercase : List[str] = jnp.ones_like(lowerCamelCase)
_lowercase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowercase : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowercase : Tuple = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self._get_input_ids_and_config()
_lowercase : List[str] = False
_lowercase : Union[str, Any] = max_length
_lowercase : List[Any] = 0
for model_class in self.all_generative_model_classes:
_lowercase : Any = model_class(lowerCamelCase)
_lowercase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowercase : Any = getattr(lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = pt_model_class(lowerCamelCase).eval()
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, flax_model.params)
_lowercase : Tuple = flax_model.generate(lowerCamelCase).sequences
_lowercase : Optional[int] = pt_model.generate(torch.tensor(lowerCamelCase, dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowercase : Optional[int] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist())
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = self._get_input_ids_and_config()
_lowercase : Optional[int] = False
_lowercase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase)
_lowercase : Any = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Tuple = jit(model.generate)
_lowercase : Any = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[Any] = self._get_input_ids_and_config()
_lowercase : Optional[int] = True
_lowercase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : int = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Optional[int] = jit(model.generate)
_lowercase : Optional[Any] = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = self._get_input_ids_and_config()
_lowercase : Optional[Any] = False
_lowercase : Tuple = max_length
_lowercase : int = 2
for model_class in self.all_generative_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase)
_lowercase : Dict = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Optional[int] = jit(model.generate)
_lowercase : Union[str, Any] = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self._get_input_ids_and_config()
_lowercase : Optional[int] = False
_lowercase : Union[str, Any] = max_length
_lowercase : Optional[Any] = 2
_lowercase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : List[Any] = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self._get_input_ids_and_config()
_lowercase : int = True
_lowercase : Dict = max_length
_lowercase : Optional[int] = 0.8
_lowercase : Union[str, Any] = 10
_lowercase : List[str] = 0.3
_lowercase : Optional[Any] = 1
_lowercase : str = 8
_lowercase : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
_lowercase : List[Any] = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : int = jit(model.generate)
_lowercase : Dict = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = self._get_input_ids_and_config()
_lowercase : Optional[Any] = max_length
_lowercase : Union[str, Any] = 1
_lowercase : Dict = 8
_lowercase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_lowercase : Dict = model_class(lowerCamelCase)
_lowercase : Optional[Any] = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Any = jit(model.generate)
_lowercase : Dict = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = self._get_input_ids_and_config()
_lowercase : List[Any] = max_length
_lowercase : Dict = 2
_lowercase : int = 1
_lowercase : str = 8
_lowercase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_lowercase : Optional[int] = model_class(lowerCamelCase)
_lowercase : Any = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Dict = jit(model.generate)
_lowercase : Tuple = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
_lowercase : List[Any] = attention_mask.at[(0, 0)].set(0)
_lowercase : Tuple = False
_lowercase : List[str] = max_length
for model_class in self.all_generative_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : Dict = model.generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Optional[int] = jit(model.generate)
_lowercase : Union[str, Any] = jit_generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowercase : Tuple = attention_mask.at[(0, 0)].set(0)
_lowercase : int = True
_lowercase : List[str] = max_length
for model_class in self.all_generative_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase)
_lowercase : Any = model.generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Any = jit(model.generate)
_lowercase : Optional[int] = jit_generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowercase : Optional[int] = attention_mask.at[(0, 0)].set(0)
_lowercase : Optional[int] = 2
_lowercase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowercase : Tuple = model_class(lowerCamelCase)
_lowercase : Optional[int] = model.generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Union[str, Any] = jit(model.generate)
_lowercase : List[Any] = jit_generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
@require_flax
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert')
_lowercase : int = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
_lowercase : str = 'Hello world'
_lowercase : List[str] = tokenizer(lowerCamelCase, return_tensors='np').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCamelCase, 'do_samples'):
model.generate(lowerCamelCase, do_samples=lowerCamelCase)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCamelCase, 'foo'):
_lowercase : str = {'foo': 'bar'}
model.generate(lowerCamelCase, **lowerCamelCase)
| 363 |
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = hidden_states.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.image.resize(
lowerCamelCase_ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
SCREAMING_SNAKE_CASE : List[Any] = self.conv(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.conv(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Dict = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Dict = nn.Dense(lowerCamelCase_ , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE : List[Any] = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE : Dict = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE : str = nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = hidden_states
SCREAMING_SNAKE_CASE : Union[str, Any] = self.norma(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = nn.swish(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.conva(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.time_emb_proj(nn.swish(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.expand_dims(jnp.expand_dims(lowerCamelCase_ , 1 ) , 1 )
SCREAMING_SNAKE_CASE : Tuple = hidden_states + temb
SCREAMING_SNAKE_CASE : Union[str, Any] = self.norma(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = nn.swish(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.dropout(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conva(lowerCamelCase_ )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE : int = self.conv_shortcut(lowerCamelCase_ )
return hidden_states + residual
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
"""simple docstring"""
def _A ( _a : int , _a : int ):
"""simple docstring"""
return abs(_a ) if a == 0 else greatest_common_divisor(b % a , _a )
def _A ( _a : int , _a : int ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A , A = y, x % y
return abs(_a )
def _A ( ):
"""simple docstring"""
try:
A = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
A = int(nums[0] )
A = int(nums[1] )
print(
f'greatest_common_divisor({num_a}, {num_a}) = '
f'{greatest_common_divisor(_a , _a )}' )
print(f'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_a , _a )}' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 77 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase__ ( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ) -> int:
A = {}
A = {}
if prompt is not None:
A = prompt
if generate_kwargs is not None:
A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
return super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Optional[Any]:
A = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
raise ValueError(
f'Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
A = self.model.config.model_type
if model_type == "git":
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
A = self.tokenizer(text=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ).input_ids
A = [self.tokenizer.cls_token_id] + input_ids
A = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
A = self.image_processor(images=lowerCamelCase_ ,header_text=lowerCamelCase_ ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
A = self.tokenizer(lowerCamelCase_ ,return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A = None
return model_inputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Optional[int]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] ,lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
A = None
if generate_kwargs is None:
A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A = model_inputs.pop(self.model.main_input_name )
A = self.model.generate(lowerCamelCase_ ,**lowerCamelCase_ ,**lowerCamelCase_ )
return model_outputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]:
A = []
for output_ids in model_outputs:
A = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,)
}
records.append(lowerCamelCase_ )
return records
| 77 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__A )
class _SCREAMING_SNAKE_CASE ( __A ):
lowerCAmelCase__ = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'audio': Audio()} )
lowerCAmelCase__ = Features({'labels': ClassLabel} )
lowerCAmelCase__ = 'audio'
lowerCAmelCase__ = 'labels'
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.label_schema.copy()
lowerCamelCase_ = features[self.label_column]
lowerCamelCase_ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 19 | from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[Any] ,__lowercase : Optional[int]=8 ):
'''simple docstring'''
A_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
A_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : List[str] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : List[str] = latents.to(lowercase )
A_ : int = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Dict = torch.device(F'''cuda:{gpu_id}''' )
A_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A_ , A_ : int = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
A_ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 1_0_0 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
A_ : Dict = self._execution_device
A_ : Dict = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
A_ : Dict = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : str = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.cat(lowercase , dim=0 )
A_ : str = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A_ : str = image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Union[str, Any] = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Optional[Any] = hint.repeat_interleave(lowercase , dim=0 )
A_ : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
A_ : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Any = self.scheduler.timesteps
A_ : str = self.movq.config.latent_channels
A_ , A_ : List[Any] = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
A_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = {'image_embeds': image_embeds, 'hint': hint}
A_ : Dict = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
A_ , A_ : List[str] = noise_pred.chunk(2 )
A_ , A_ : List[str] = variance_pred.chunk(2 )
A_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A_ , A_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
A_ : Any = self.movq.decode(lowercase , force_not_quantize=lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
A_ : Optional[Any] = image * 0.5 + 0.5
A_ : int = image.clamp(0 , 1 )
A_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 140 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
lowerCamelCase__ : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('RGB' )
return image
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Tuple = val
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase__ : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase__ : str = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase__ : Tuple = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
lowerCamelCase__ : List[Any] = qkv_bias
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 364 if 'coco' in model_name else 224
lowerCamelCase__ : List[str] = InstructBlipVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCamelCase__ : Any = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase__ : Tuple = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCamelCase__ : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCamelCase__ : str = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCamelCase__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowerCamelCase__ : Any = InstructBlipConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase , qformer_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ):
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
lowerCamelCase__ : Optional[Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCamelCase__ : Optional[int] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
lowerCamelCase__ , lowerCamelCase__ : Any = get_blipa_config(_lowerCamelCase )
lowerCamelCase__ : Dict = InstructBlipForConditionalGeneration(_lowerCamelCase ).eval()
lowerCamelCase__ : int = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCamelCase__ : str = 'cuda:1' if torch.cuda.is_available() else 'cpu'
lowerCamelCase__ : Dict = 'cuda:2' if torch.cuda.is_available() else 'cpu'
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCamelCase__ : Union[str, Any] = original_model.state_dict()
lowerCamelCase__ : Dict = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase__ : List[Any] = state_dict.pop(_lowerCamelCase )
if key.startswith('Qformer.bert' ):
lowerCamelCase__ : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCamelCase__ : Optional[Any] = key.replace('self' , 'attention' )
if "llm_proj" in key:
lowerCamelCase__ : str = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCamelCase__ : Any = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
lowerCamelCase__ : int = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
lowerCamelCase__ : Tuple = key.replace('t5' , 'language' )
lowerCamelCase__ : Any = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
lowerCamelCase__ : List[str] = load_demo_image()
lowerCamelCase__ : int = 'What is unusual about this image?'
# create processor
lowerCamelCase__ : str = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
lowerCamelCase__ : int = InstructBlipProcessor(
image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase , )
lowerCamelCase__ : Tuple = processor(images=_lowerCamelCase , text=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# make sure processor creates exact same pixel values
lowerCamelCase__ : Dict = vis_processors['eval'](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
lowerCamelCase__ : str = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowerCamelCase__ : List[str] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
lowerCamelCase__ : Tuple = hf_model(**_lowerCamelCase ).logits
else:
lowerCamelCase__ : List[str] = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
lowerCamelCase__ : Dict = tokenizer('\n' , return_tensors='pt' ).input_ids.to(_lowerCamelCase )
lowerCamelCase__ : Tuple = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCamelCase__ : List[Any] = hf_model(**_lowerCamelCase , labels=_lowerCamelCase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCamelCase__ : Union[str, Any] = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _lowerCamelCase , atol=_lowerCamelCase )
print('Looks ok!' )
print('Generating with original model...' )
lowerCamelCase__ : List[str] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
lowerCamelCase__ : str = hf_model.generate(
**_lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCamelCase__ : List[Any] = 2
print('Original generation:' , _lowerCamelCase )
lowerCamelCase__ : List[Any] = processor.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = [text.strip() for text in output_text]
print('HF generation:' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
A_ : Optional[Any] = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
A_ : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 316 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
__UpperCamelCase :Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(__lowercase)
__UpperCamelCase :Tuple = -1
__UpperCamelCase :Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowercase)
__UpperCamelCase :Dict = model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase)
__UpperCamelCase :Union[str, Any] = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
__UpperCamelCase :Optional[Any] = TextStreamer(__lowercase)
model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase , streamer=__lowercase)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase :str = cs.out[:-1]
self.assertEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
__UpperCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(__lowercase)
__UpperCamelCase :Optional[int] = -1
__UpperCamelCase :int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowercase)
__UpperCamelCase :List[str] = model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase)
__UpperCamelCase :Any = tokenizer.decode(greedy_ids[0])
__UpperCamelCase :Optional[int] = TextIteratorStreamer(__lowercase)
__UpperCamelCase :Tuple = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__UpperCamelCase :Optional[int] = Thread(target=model.generate , kwargs=__lowercase)
thread.start()
__UpperCamelCase :List[str] = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
__UpperCamelCase :Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(__lowercase)
__UpperCamelCase :Union[str, Any] = -1
__UpperCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowercase)
__UpperCamelCase :Tuple = model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase)
__UpperCamelCase :Optional[int] = greedy_ids[:, input_ids.shape[1] :]
__UpperCamelCase :Dict = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
__UpperCamelCase :Any = TextStreamer(__lowercase , skip_prompt=__lowercase)
model.generate(__lowercase , max_new_tokens=10 , do_sample=__lowercase , streamer=__lowercase)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase :int = cs.out[:-1]
self.assertEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained('''distilgpt2''')
__UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''').to(__lowercase)
__UpperCamelCase :List[Any] = -1
__UpperCamelCase :Tuple = torch.ones((1, 5) , device=__lowercase).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCamelCase :str = TextStreamer(__lowercase , skip_special_tokens=__lowercase)
model.generate(__lowercase , max_new_tokens=1 , do_sample=__lowercase , streamer=__lowercase)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCamelCase :Dict = cs.out[:-1] # Remove the final "\n"
__UpperCamelCase :List[str] = tokenizer(__lowercase , return_tensors='''pt''')
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
__UpperCamelCase :Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(__lowercase)
__UpperCamelCase :Tuple = -1
__UpperCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowercase)
__UpperCamelCase :Tuple = TextIteratorStreamer(__lowercase , timeout=0.0_01)
__UpperCamelCase :List[str] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__UpperCamelCase :int = Thread(target=model.generate , kwargs=__lowercase)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowercase):
__UpperCamelCase :str = """"""
for new_text in streamer:
streamer_text += new_text
| 43 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class __snake_case ( lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.framework == "tf":
lowercase : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.get_masked_index(snake_case )
lowercase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,f"No mask_token ({self.tokenizer.mask_token}) found on the input" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
if return_tensors is None:
lowercase : int = self.framework
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=snake_case )
self.ensure_exactly_one_mask_token(snake_case )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.model(**snake_case )
lowercase : Tuple = model_inputs["""input_ids"""]
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ,snake_case=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase : str = target_ids.shape[0]
lowercase : Optional[Any] = model_outputs["""input_ids"""][0]
lowercase : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowercase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase : Tuple = outputs.numpy()
lowercase : Tuple = outputs[0, masked_index, :]
lowercase : Any = stable_softmax(snake_case ,axis=-1 )
if target_ids is not None:
lowercase : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case ,0 ) ,target_ids.reshape(-1 ,1 ) )
lowercase : int = tf.expand_dims(snake_case ,0 )
lowercase : Tuple = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : int = topk.values.numpy(), topk.indices.numpy()
else:
lowercase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase : Union[str, Any] = outputs[0, masked_index, :]
lowercase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase : List[str] = probs[..., target_ids]
lowercase , lowercase : Union[str, Any] = probs.topk(snake_case )
lowercase : Any = []
lowercase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
lowercase : Dict = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
lowercase : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowercase : Union[str, Any] = target_ids[p].tolist()
lowercase : Tuple = p
# Filter padding out:
lowercase : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase : Tuple = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Optional[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(snake_case )
result.append(snake_case )
if single_mask:
return result[0]
return result
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : List[Any] = [targets]
try:
lowercase : List[str] = self.tokenizer.get_vocab()
except Exception:
lowercase : Any = {}
lowercase : Dict = []
for target in targets:
lowercase : Dict = vocab.get(snake_case ,snake_case )
if id_ is None:
lowercase : Optional[int] = self.tokenizer(
snake_case ,add_special_tokens=snake_case ,return_attention_mask=snake_case ,return_token_type_ids=snake_case ,max_length=1 ,truncation=snake_case ,)["""input_ids"""]
if len(snake_case ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowercase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowercase : Optional[Any] = list(set(snake_case ) )
if len(snake_case ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowercase : Optional[Any] = np.array(snake_case )
return target_ids
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Dict = {}
if targets is not None:
lowercase : str = self.get_target_ids(snake_case ,snake_case )
lowercase : List[Any] = target_ids
if top_k is not None:
lowercase : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = super().__call__(snake_case ,**snake_case )
if isinstance(snake_case ,snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
| 20 | 0 |
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20)) | 303 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins | 303 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """deberta-v2"""
def __init__( self : Optional[int] , A : List[Any]=128_100 , A : int=1_536 , A : Dict=24 , A : List[str]=24 , A : Optional[int]=6_144 , A : List[str]="gelu" , A : Optional[int]=0.1 , A : Any=0.1 , A : Optional[Any]=512 , A : Tuple=0 , A : List[Any]=0.02 , A : Dict=1E-7 , A : Optional[int]=False , A : Dict=-1 , A : int=0 , A : Union[str, Any]=True , A : Tuple=None , A : Optional[Any]=0 , A : Union[str, Any]="gelu" , **A : Tuple , ):
super().__init__(**A )
__snake_case: Tuple = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Any = num_attention_heads
__snake_case: Tuple = intermediate_size
__snake_case: Tuple = hidden_act
__snake_case: List[str] = hidden_dropout_prob
__snake_case: str = attention_probs_dropout_prob
__snake_case: Optional[Any] = max_position_embeddings
__snake_case: str = type_vocab_size
__snake_case: Union[str, Any] = initializer_range
__snake_case: str = relative_attention
__snake_case: List[str] = max_relative_positions
__snake_case: List[str] = pad_token_id
__snake_case: Optional[int] = position_biased_input
# Backwards compatibility
if type(A ) == str:
__snake_case: str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
__snake_case: Dict = pos_att_type
__snake_case: List[Any] = vocab_size
__snake_case: Optional[Any] = layer_norm_eps
__snake_case: str = kwargs.get("""pooler_hidden_size""" , A )
__snake_case: Any = pooler_dropout
__snake_case: Optional[Any] = pooler_hidden_act
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.task == "multiple-choice":
__snake_case: Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case: Optional[int] = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 12
def UpperCAmelCase__ ( self : Dict , A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , A : int = -1 , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 3 , A : int = 40 , A : int = 40 , A : "PreTrainedTokenizerBase" = None , ):
__snake_case: Union[str, Any] = super().generate_dummy_inputs(preprocessor=A , framework=A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 111 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """ibert"""
def __init__( self : Dict , A : Union[str, Any]=30_522 , A : List[Any]=768 , A : List[Any]=12 , A : Optional[int]=12 , A : Optional[Any]=3_072 , A : int="gelu" , A : str=0.1 , A : List[Any]=0.1 , A : Optional[Any]=512 , A : int=2 , A : Union[str, Any]=0.02 , A : List[str]=1E-12 , A : Optional[int]=1 , A : Optional[int]=0 , A : List[str]=2 , A : str="absolute" , A : Any=False , A : Optional[Any]="none" , **A : Any , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
__snake_case: List[Any] = vocab_size
__snake_case: Optional[Any] = hidden_size
__snake_case: List[str] = num_hidden_layers
__snake_case: Tuple = num_attention_heads
__snake_case: List[str] = hidden_act
__snake_case: Optional[Any] = intermediate_size
__snake_case: Tuple = hidden_dropout_prob
__snake_case: List[str] = attention_probs_dropout_prob
__snake_case: Any = max_position_embeddings
__snake_case: int = type_vocab_size
__snake_case: List[str] = initializer_range
__snake_case: List[Any] = layer_norm_eps
__snake_case: Optional[int] = position_embedding_type
__snake_case: str = quant_mode
__snake_case: Optional[int] = force_dequant
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
__snake_case: List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case: List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 111 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->int:
"""simple docstring"""
if start is None:
lowercase : Dict = 0
if end is None:
lowercase : Tuple = len(_a ) - 1
if start >= end:
return
lowercase : Union[str, Any] = (start + end) // 2
slowsort(_a, _a, _a )
slowsort(_a, mid + 1, _a )
if sequence[end] < sequence[mid]:
lowercase : Any = sequence[mid], sequence[end]
slowsort(_a, _a, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
from maths.prime_factors import prime_factors
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def _snake_case ( self )-> Dict:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self )-> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> str:
lowerCamelCase_ =FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]:
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[int]:
lowerCamelCase_ =FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Any:
lowerCamelCase_ =FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Dict:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase:str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> List[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _snake_case ( self )-> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 154 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def _snake_case ( self )-> Dict:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self )-> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> str:
lowerCamelCase_ =FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]:
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[int]:
lowerCamelCase_ =FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Any:
lowerCamelCase_ =FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Dict:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase:str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> List[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _snake_case ( self )-> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 154 | 1 |
import functools
from typing import Any
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : list[str] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = "WORD_KEEPER"
for word in words:
SCREAMING_SNAKE_CASE = trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = trie_node[c]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCAmelCase__ : int ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE = trie
for i in range(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = trie_node.get(string[i] , UpperCAmelCase__ )
if trie_node is None:
return False
if trie_node.get(UpperCAmelCase__ , UpperCAmelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str = " " ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(UpperCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(UpperCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 206 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Any=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=10 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[str]=2 , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =patch_size
lowerCamelCase__: Optional[int] =max_length
lowerCamelCase__: Dict =num_mel_bins
lowerCamelCase__: Union[str, Any] =is_training
lowerCamelCase__: List[Any] =use_labels
lowerCamelCase__: List[Any] =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: str =num_attention_heads
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: Optional[int] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Tuple =type_sequence_label_size
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: str =scope
lowerCamelCase__: str =frequency_stride
lowerCamelCase__: Optional[Any] =time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCamelCase__: Union[str, Any] =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCamelCase__: Optional[Any] =(self.max_length - self.patch_size) // self.time_stride + 1
lowerCamelCase__: Optional[int] =frequency_out_dimension * time_out_dimension
lowerCamelCase__: int =num_patches + 2
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
lowerCamelCase__: str =None
if self.use_labels:
lowerCamelCase__: int =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Optional[int] =self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =ASTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: str =model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =self.prepare_config_and_inputs()
(
lowerCamelCase__
): List[str] =config_and_inputs
lowerCamelCase__: Tuple ={"input_values": input_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->str:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: str =ASTModelTester(self)
lowerCamelCase__: Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[Any] =model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCamelCase__: Union[str, Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Tuple =model_class(UpperCAmelCase_)
lowerCamelCase__: str =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase__: List[Any] =["input_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: List[Any] =ASTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCamelCase__: int =hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
lowerCamelCase__: Optional[int] =torchaudio.load(lowerCamelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593")
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Any =self.default_feature_extractor
lowerCamelCase__: Tuple =ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.default_feature_extractor
lowerCamelCase__: Any =prepare_audio()
lowerCamelCase__: Tuple =audio.squeeze().numpy()
lowerCamelCase__: Dict =feature_extractor(UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: int =torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[Any] =torch.tensor([-0.8760, -7.0042, -8.6602]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
| 10 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCamelCase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCamelCase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCamelCase__ )
return parser.parse_args()
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = parse_args()
# Import training_script as a module.
lowercase__ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ : Dict = script_fpath.stem
lowercase__ : int = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
lowercase__ : Dict = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 130 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352 |
A__ = 256
# Modulus to hash a string
A__ = 100_0003
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[int] = len(__lowerCAmelCase )
if p_len > t_len:
return False
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : int = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
snake_case__ : int = '''ABABX'''
snake_case__ : Any = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
snake_case__ : Dict = '''AAAB'''
snake_case__ : Union[str, Any] = '''ABAAAAAB'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
snake_case__ : Union[str, Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Optional[Any] = '''Lüsai'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = '''Lue'''
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 44 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Tuple:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__UpperCamelCase , )
assert hasattr(self , 'env' )
def __a ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = {
'enabled': True,
'processes_per_host': 8,
}
snake_case__ : Any = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
snake_case__ : Optional[int] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
snake_case__ : int = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='py36' , )
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
TrainingJobAnalytics(__UpperCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.create_estimator(__UpperCamelCase )
# run training
estimator.fit()
# result dataframe
snake_case__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
snake_case__ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase )
| 143 | import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : Optional[Any] = controlnet_params
snake_case__ : Any = 'bird'
snake_case__ : Any = jax.device_count()
snake_case__ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
snake_case__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ : Any = jax.random.PRNGKey(0 )
snake_case__ : Dict = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : Any = replicate(__UpperCamelCase )
snake_case__ : Union[str, Any] = shard(__UpperCamelCase )
snake_case__ : Any = shard(__UpperCamelCase )
snake_case__ : Dict = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : str = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : List[str] = controlnet_params
snake_case__ : Optional[Any] = 'Chef in the kitchen'
snake_case__ : List[Any] = jax.device_count()
snake_case__ : int = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
snake_case__ : int = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ : Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ : Any = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : List[Any] = replicate(__UpperCamelCase )
snake_case__ : List[str] = shard(__UpperCamelCase )
snake_case__ : Optional[int] = shard(__UpperCamelCase )
snake_case__ : Any = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : Any = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 143 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 59 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = '''true'''
def lowerCamelCase_ ( _a : List[Any] , _a : List[str]=82 , _a : Tuple=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : int = RegressionModel()
UpperCAmelCase_ : List[Any] = deepcopy(_a )
UpperCAmelCase_ : Tuple = RegressionDataset(length=_a )
UpperCAmelCase_ : int = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a : Accelerator , _a : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCAmelCase_ : int = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_a : str ):
UpperCAmelCase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : List[str] ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a : Any , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCAmelCase_ : Dict = get_dataloader(_a , not dispatch_batches )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch.values()
with torch.no_grad():
UpperCAmelCase_ : str = model(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a : Accelerator , _a : str=82 , _a : str=False , _a : Dict=False , _a : Dict=16 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_basic_setup(_a , _a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'''
def lowerCamelCase_ ( _a : bool = False , _a : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ , UpperCAmelCase_ : str = get_mrpc_setup(_a , _a )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCAmelCase_ : str = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : List[str] = model(**_a )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = batch["""labels"""]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCAmelCase_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[int] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCAmelCase_ : str = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 59 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple=13 , __snake_case : Optional[Any]=7 , __snake_case : Optional[int]=True , __snake_case : int=True , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : str=99 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Optional[int]=4 , __snake_case : List[str]=37 , __snake_case : Any="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Dict=0.1 , __snake_case : Dict=512 , __snake_case : List[str]=16 , __snake_case : Any=2 , __snake_case : List[str]=0.02 , __snake_case : str=False , __snake_case : str=True , __snake_case : List[str]="None" , __snake_case : List[Any]=3 , __snake_case : Optional[Any]=4 , __snake_case : Tuple=None , ) -> List[str]:
UpperCAmelCase : int = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Union[str, Any] = use_input_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : Union[str, Any] = type_vocab_size
UpperCAmelCase : int = type_sequence_label_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : int = num_choices
UpperCAmelCase : List[Any] = relative_attention
UpperCAmelCase : Any = position_biased_input
UpperCAmelCase : Any = pos_att_type
UpperCAmelCase : Optional[int] = scope
def A ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[Any] ) -> Union[str, Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A ( self : int ) -> List[str]:
UpperCAmelCase : Dict = self.get_config()
UpperCAmelCase : int = 300
return config
def A ( self : Optional[int] , __snake_case : str ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A ( self : Optional[int] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Dict , __snake_case : str ) -> List[Any]:
UpperCAmelCase : Optional[int] = DebertaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )[0]
UpperCAmelCase : Union[str, Any] = model(__snake_case , token_type_ids=__snake_case )[0]
UpperCAmelCase : List[Any] = model(__snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A ( self : Dict , __snake_case : Dict , __snake_case : str , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : List[Any] ) -> str:
UpperCAmelCase : Tuple = DebertaForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : int , __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : Optional[int] = DebertaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__snake_case )
def A ( self : Tuple , __snake_case : Tuple , __snake_case : List[str] , __snake_case : int , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any ) -> Tuple:
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Tuple = DebertaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> List[Any]:
UpperCAmelCase : int = DebertaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any ) -> Union[str, Any]:
UpperCAmelCase : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : Optional[int] ) -> int:
UpperCAmelCase : Optional[Any] = DebertaModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Dict ) -> Any:
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__snake_case )
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__snake_case )
def A ( self : Tuple ) -> int:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__snake_case )
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__snake_case )
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__snake_case )
@slow
def A ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = DebertaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def A ( self : Tuple ) -> str:
pass
@slow
def A ( self : Any ) -> str:
UpperCAmelCase : Tuple = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase : Optional[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : Any = model(__snake_case , attention_mask=__snake_case )[0]
# compare the actual values for a slice.
UpperCAmelCase : Optional[int] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 23 |
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = len(_lowerCAmelCase)
UpperCamelCase_ = len(matrix[0])
UpperCamelCase_ = min(_lowerCAmelCase , _lowerCAmelCase)
for row in range(_lowerCAmelCase):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCAmelCase):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(_lowerCAmelCase , _lowerCAmelCase):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , _lowerCAmelCase):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(_lowerCAmelCase):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : str ) -> Optional[int]:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(__A ):
for j in range(__A ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Union[str, Any] ) -> str:
"""simple docstring"""
a_ : Dict = [[float('inf' ) for _ in range(__A )] for _ in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Any = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__A ):
# looping through rows of graph array
for i in range(__A ):
# looping through columns of graph array
for j in range(__A ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
a_ : Optional[int] = dist[i][k] + dist[k][j]
_print_dist(__A , __A )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = int(input('Enter number of vertices: '))
UpperCAmelCase_ : Any = int(input('Enter number of edges: '))
UpperCAmelCase_ : List[str] = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCAmelCase_ : List[Any] = int(input('Enter source:'))
UpperCAmelCase_ : Optional[int] = int(input('Enter destination:'))
UpperCAmelCase_ : Union[str, Any] = float(input('Enter weight:'))
UpperCAmelCase_ : Optional[int] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 120 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]="" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="train" ) -> Tuple:
assert os.path.isdir(SCREAMING_SNAKE_CASE__ )
a_ : int = []
a_ : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.documents )
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> str:
a_ : int = self.documents[idx]
a_ : Tuple = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as source:
a_ : Dict = source.read()
a_ , a_ : Optional[Any] = process_story(SCREAMING_SNAKE_CASE__ )
return document_name, story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = list(filter(lambda __A : len(__A ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
a_ : List[Any] = [_add_missing_period(__A ) for line in nonempty_lines]
# gather article lines
a_ : int = []
a_ : List[Any] = deque(__A )
while True:
try:
a_ : Dict = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__A )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
a_ : List[str] = list(filter(lambda __A : not t.startswith('@highlight' ) , __A ) )
return story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Any:
"""simple docstring"""
a_ : Any = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] , __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if len(__A ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__A )) )
return sequence
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : str ) -> Any:
"""simple docstring"""
a_ : Optional[int] = torch.ones_like(__A )
a_ : List[str] = sequence == pad_token_id
a_ : str = 0
return mask
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[Any] , __A : Dict ) -> List[str]:
"""simple docstring"""
a_ : Optional[int] = [tokenizer.encode(__A ) for line in story_lines]
a_ : int = [token for sentence in story_lines_token_ids for token in sentence]
a_ : Dict = [tokenizer.encode(__A ) for line in summary_lines]
a_ : int = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
a_ : int = []
for sequence in batch:
a_ : int = -1
a_ : Dict = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__A )
return torch.tensor(__A )
| 120 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = CanineTokenizer
_lowerCamelCase : Tuple = False
def lowercase ( self : List[Any] ):
super().setUp()
_UpperCAmelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : List[str] ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowercase ( self : Union[str, Any] , **snake_case_ : List[Any] ):
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
_UpperCAmelCase = 1_0_2_4
return tokenizer
@require_torch
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
_UpperCAmelCase = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
_UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_UpperCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
_UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , snake_case_ )
self.assertIn("attention_mask" , snake_case_ )
self.assertIn("token_type_ids" , snake_case_ )
@require_torch
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = [
"What's the weater?",
"It's about 25 degrees.",
]
_UpperCAmelCase = tokenizer(
text_target=snake_case_ , max_length=3_2 , padding="max_length" , truncation=snake_case_ , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def lowercase ( self : Union[str, Any] ):
# safety check on max_len default value so we are sure the test works
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ )
_UpperCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
shutil.rmtree(snake_case_ )
_UpperCAmelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
_UpperCAmelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_UpperCAmelCase = chr(0Xe0_07 )
additional_special_tokens.append(snake_case_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ )
_UpperCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertIn(snake_case_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase , _UpperCAmelCase = self.get_clean_sequence(snake_case_ )
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0Xe0_05
_UpperCAmelCase = chr(snake_case_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
_UpperCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=snake_case_ )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , input_encoded + special_token_id )
_UpperCAmelCase = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def lowercase ( self : int ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = chr(0Xe0_05 )
_UpperCAmelCase = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=snake_case_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
_UpperCAmelCase = tokenizer.tokenize(snake_case_ )
_UpperCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
self.assertEqual(len(snake_case_ ) , 1 )
self.assertEqual(token_a[0] , snake_case_ )
self.assertEqual(token_a[0] , snake_case_ )
@require_tokenizers
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0Xe0_06
_UpperCAmelCase = chr(snake_case_ )
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(snake_case_ )
tokenizer.from_pretrained(snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
_UpperCAmelCase = json.load(snake_case_ )
with open(os.path.join(snake_case_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
_UpperCAmelCase = json.load(snake_case_ )
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0Xe0_06
_UpperCAmelCase = chr(snake_case_ )
_UpperCAmelCase = [new_token_a]
_UpperCAmelCase = [new_token_a]
with open(os.path.join(snake_case_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case_ , snake_case_ )
with open(os.path.join(snake_case_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case_ , snake_case_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_UpperCAmelCase = tokenizer_class.from_pretrained(snake_case_ , extra_ids=0 )
self.assertIn(snake_case_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_UpperCAmelCase = 0Xe0_07
_UpperCAmelCase = chr(snake_case_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_UpperCAmelCase = [AddedToken(snake_case_ , lstrip=snake_case_ )]
_UpperCAmelCase = tokenizer_class.from_pretrained(
snake_case_ , additional_special_tokens=snake_case_ , extra_ids=0 )
self.assertIn(snake_case_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = "hello world"
if self.space_between_special_tokens:
_UpperCAmelCase = "[CLS] hello world [SEP]"
else:
_UpperCAmelCase = input
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.decode(snake_case_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(snake_case_ , [output, output.lower()] )
def lowercase ( self : str ):
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
_UpperCAmelCase = "a"
_UpperCAmelCase = ord(snake_case_ )
for attr in attributes_list:
setattr(snake_case_ , attr + "_id" , snake_case_ )
self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(getattr(snake_case_ , attr + "_id" ) , snake_case_ )
setattr(snake_case_ , attr + "_id" , snake_case_ )
self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(getattr(snake_case_ , attr + "_id" ) , snake_case_ )
setattr(snake_case_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens_ids" ) , [] )
_UpperCAmelCase = 0Xe0_06
_UpperCAmelCase = chr(snake_case_ )
setattr(snake_case_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowercase ( self : Any ):
pass
def lowercase ( self : List[Any] ):
pass
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : List[Any] ):
pass
def lowercase ( self : List[Any] ):
pass
def lowercase ( self : int ):
pass
def lowercase ( self : int ):
pass
def lowercase ( self : Optional[Any] ):
pass
| 22 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Tuple ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = TFResNetModel(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
_snake_case = self.num_labels
_snake_case = TFResNetForImageClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__a = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] ):
_snake_case = TFResNetModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : List[Any] ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def lowercase ( self : Any ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def lowercase ( self : List[str] ):
pass
def lowercase ( self : int ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
_snake_case = model_class(_lowerCamelCase )
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFResNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
| 288 | 0 |
from __future__ import annotations
from collections import deque
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(_UpperCamelCase )
self.set_fail_transitions()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Union[str, Any] = 0
for character in keyword:
UpperCAmelCase_ : Optional[Any] = self.find_next_state(_UpperCamelCase , _UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase_ : List[Any] = len(self.adlist ) - 1
else:
UpperCAmelCase_ : Tuple = next_state
self.adlist[current_state]["output"].append(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> None:
UpperCAmelCase_ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCamelCase )
UpperCAmelCase_ : Tuple = 0
while q:
UpperCAmelCase_ : int = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.adlist[r]['fail_state']
while (
self.find_next_state(_UpperCamelCase , self.adlist[child]['value'] ) is None
and state != 0
):
UpperCAmelCase_ : Dict = self.adlist[state]['fail_state']
UpperCAmelCase_ : Optional[int] = self.find_next_state(
_UpperCamelCase , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __UpperCAmelCase ( self , _UpperCamelCase ) -> dict[str, list[int]]:
UpperCAmelCase_ : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(_UpperCamelCase ) ):
while (
self.find_next_state(_UpperCamelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase_ : int = self.adlist[current_state]['fail_state']
UpperCAmelCase_ : Any = self.find_next_state(_UpperCamelCase , string[i] )
if next_state is None:
UpperCAmelCase_ : Optional[int] = 0
else:
UpperCAmelCase_ : int = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase_ : Dict = []
result[key].append(i - len(_UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = '''layoutlmv3'''
def __init__( self , _UpperCamelCase=5_0_2_6_5 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=1_2_8 , _UpperCamelCase=1_2_8 , _UpperCamelCase=True , _UpperCamelCase=3_2 , _UpperCamelCase=1_2_8 , _UpperCamelCase=6_4 , _UpperCamelCase=2_5_6 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=2_2_4 , _UpperCamelCase=3 , _UpperCamelCase=1_6 , _UpperCamelCase=None , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(
vocab_size=_UpperCamelCase , hidden_size=_UpperCamelCase , num_hidden_layers=_UpperCamelCase , num_attention_heads=_UpperCamelCase , intermediate_size=_UpperCamelCase , hidden_act=_UpperCamelCase , hidden_dropout_prob=_UpperCamelCase , attention_probs_dropout_prob=_UpperCamelCase , max_position_embeddings=_UpperCamelCase , type_vocab_size=_UpperCamelCase , initializer_range=_UpperCamelCase , layer_norm_eps=_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : str = max_ad_position_embeddings
UpperCAmelCase_ : Union[str, Any] = coordinate_size
UpperCAmelCase_ : Union[str, Any] = shape_size
UpperCAmelCase_ : str = has_relative_attention_bias
UpperCAmelCase_ : Tuple = rel_pos_bins
UpperCAmelCase_ : Dict = max_rel_pos
UpperCAmelCase_ : Any = has_spatial_attention_bias
UpperCAmelCase_ : Optional[Any] = rel_ad_pos_bins
UpperCAmelCase_ : List[str] = max_rel_ad_pos
UpperCAmelCase_ : List[str] = text_embed
UpperCAmelCase_ : Dict = visual_embed
UpperCAmelCase_ : Optional[int] = input_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = version.parse('''1.12''' )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1E-5
@property
def __UpperCAmelCase ( self ) -> int:
return 1_2
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = -1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = 3 , _UpperCamelCase = 4_0 , _UpperCamelCase = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , _UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : Optional[int] = compute_effective_axis_dimension(
_UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : Any = processor.tokenizer.num_special_tokens_to_add(_UpperCamelCase )
UpperCAmelCase_ : Any = compute_effective_axis_dimension(
_UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Tuple = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[str] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : str = self._generate_dummy_images(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[str] = dict(
processor(
_UpperCamelCase , text=_UpperCamelCase , boxes=_UpperCamelCase , return_tensors=_UpperCamelCase , ) )
return inputs
| 145 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase = '''true'''
def snake_case_ ( snake_case , snake_case=82 , snake_case=16 ) -> Optional[Any]:
set_seed(42 )
lowercase__: List[Any] = RegressionModel()
lowercase__: Any = deepcopy(__lowercase )
lowercase__: int = RegressionDataset(length=__lowercase )
lowercase__: Optional[int] = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
lowercase__ , lowercase__: Any = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( snake_case , snake_case=False ) -> int:
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase__: Dict = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case ):
lowercase__: Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
lowercase__: Dict = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase__: List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case ):
if use_longest:
return tokenizer.pad(__lowercase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=16 )
def snake_case_ ( snake_case , snake_case ) -> Optional[int]:
lowercase__: Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
lowercase__: List[Any] = get_dataloader(__lowercase , not dispatch_batches )
lowercase__: Any = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__lowercase )
lowercase__ , lowercase__: Optional[Any] = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( snake_case , snake_case , snake_case ) -> Tuple:
lowercase__: Optional[Any] = []
for batch in dataloader:
lowercase__ , lowercase__: Optional[int] = batch.values()
with torch.no_grad():
lowercase__: Union[str, Any] = model(__lowercase )
lowercase__ , lowercase__: Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase__ , lowercase__: Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
lowercase__ , lowercase__: int = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( snake_case , snake_case=82 , snake_case=False , snake_case=False , snake_case=16 ) -> List[str]:
lowercase__ , lowercase__ , lowercase__: List[Any] = get_basic_setup(__lowercase , __lowercase , __lowercase )
lowercase__ , lowercase__: Tuple = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'
def snake_case_ ( snake_case = False , snake_case = False ) -> str:
lowercase__: str = evaluate.load('glue' , 'mrpc' )
lowercase__ , lowercase__: Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
lowercase__ , lowercase__ , lowercase__: Dict = setup['no']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
lowercase__: str = model(**__lowercase )
lowercase__: Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['labels'] )
lowercase__: List[Any] = metric.compute()
# Then do distributed
lowercase__ , lowercase__ , lowercase__: Optional[int] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase__: int = model(**__lowercase )
lowercase__: int = outputs.logits.argmax(dim=-1 )
lowercase__: str = batch['labels']
lowercase__ , lowercase__: Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
lowercase__: int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def snake_case_ ( ) -> int:
lowercase__: Dict = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase__: Union[str, Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(__lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase__: Tuple = Accelerator()
test_torch_metrics(__lowercase , 5_12 )
accelerator.state._reset_state()
def snake_case_ ( snake_case ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 196 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = n
_lowerCAmelCase = [None] * self.n
_lowerCAmelCase = 0 # index of the first element
_lowerCAmelCase = 0
_lowerCAmelCase = 0
def __len__( self ):
"""simple docstring"""
return self.size
def _lowercase ( self ):
"""simple docstring"""
return self.size == 0
def _lowercase ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
_lowerCAmelCase = data
_lowerCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def _lowercase ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
_lowerCAmelCase = self.array[self.front]
_lowerCAmelCase = None
_lowerCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 369 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowercase = datasets.logging.get_logger(__name__)
_lowercase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
_lowercase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
_lowercase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def A (__lowerCamelCase :str , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Union[str, Any]=False , __lowerCamelCase :List[Any]=False , __lowerCamelCase :str=True , __lowerCamelCase :str=False , __lowerCamelCase :str="dummy_doc" ):
_lowerCAmelCase = {doc: key_lines}
_lowerCAmelCase = {doc: sys_lines}
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(__lowerCamelCase , key_doc_lines[doc] , __lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(__lowerCamelCase , key_doc_lines[doc] , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(__lowerCamelCase , sys_doc_lines[doc] , __lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(__lowerCamelCase , key_doc_lines[doc] , __lowerCamelCase , __lowerCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(__lowerCamelCase , __lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(__lowerCamelCase , __lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase = reader.get_mention_assignments(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = reader.get_mention_assignments(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def A (__lowerCamelCase :List[str] , __lowerCamelCase :str , __lowerCamelCase :str , __lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = get_coref_infos(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = evaluator.evaluate_documents(__lowerCamelCase , __lowerCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
_lowerCAmelCase = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase = line.split()[5]
if not parse_col == "-":
_lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase = util.check_gold_parse_annotation(_lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase = evaluate(
key_lines=_lowercase , sys_lines=_lowercase , metrics=_lowercase , NP_only=_lowercase , remove_nested=_lowercase , keep_singletons=_lowercase , min_span=_lowercase , )
return score
| 229 | 0 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a__ , )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = RobertaConfig
__UpperCamelCase : Optional[Any] = '''roberta'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = RobertaEmbeddings(SCREAMING_SNAKE_CASE__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a__ , )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = RobertaConfig
__UpperCamelCase : List[str] = '''roberta'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = config.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = config.num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = DeeRobertaModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=False , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_layers
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.roberta(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = outputs[1]
SCREAMING_SNAKE_CASE__ : str = self.dropout(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.classifier(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ : Optional[Any] = e.message
SCREAMING_SNAKE_CASE__ : Optional[int] = e.exit_layer
SCREAMING_SNAKE_CASE__ : Any = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ : List[Any] = entropy(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MSELoss()
SCREAMING_SNAKE_CASE__ : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(SCREAMING_SNAKE_CASE__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Any = MSELoss()
SCREAMING_SNAKE_CASE__ : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(SCREAMING_SNAKE_CASE__ )
if train_highway:
SCREAMING_SNAKE_CASE__ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ : List[str] = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ : str = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 25 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
from __future__ import annotations
import math
_lowerCamelCase = '2020.9.26'
_lowerCamelCase = 'xcodz-dot, cclaus, dhruvmanila'
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> tuple[float, float]:
if not all(isinstance(__UpperCamelCase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__UpperCamelCase )
UpperCAmelCase_ = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : str , __UpperCamelCase : float ) -> tuple[float, float, float]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ = locals()
del input_variables["axis"]
if not all(isinstance(__UpperCamelCase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(__UpperCamelCase )
UpperCAmelCase_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ = x * math.cos(__UpperCamelCase ) - y * math.sin(__UpperCamelCase )
UpperCAmelCase_ = y * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase )
UpperCAmelCase_ = z
elif axis == "x":
UpperCAmelCase_ = y * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase )
UpperCAmelCase_ = z * math.cos(__UpperCamelCase ) + y * math.sin(__UpperCamelCase )
UpperCAmelCase_ = x
elif axis == "y":
UpperCAmelCase_ = x * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase )
UpperCAmelCase_ = z * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase )
UpperCAmelCase_ = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(F"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 177 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : int , __snake_case : List[Any]=7 , __snake_case : Any=3 , __snake_case : Any=18 , __snake_case : str=30 , __snake_case : Any=4_00 , __snake_case : Optional[int]=True , __snake_case : str=None , __snake_case : Any=True , __snake_case : List[Any]=None , ):
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
def lowerCamelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''crop_size''' ) )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : str ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 177 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = """gpt_neox"""
def __init__( self : Optional[int] , a_ : Tuple=5_04_32 , a_ : List[str]=61_44 , a_ : str=44 , a_ : Tuple=64 , a_ : List[Any]=2_45_76 , a_ : Any="gelu" , a_ : Any=0.25 , a_ : Dict=1_00_00 , a_ : Union[str, Any]=0.0 , a_ : Optional[int]=0.0 , a_ : int=0.1 , a_ : int=20_48 , a_ : Union[str, Any]=0.02 , a_ : Optional[Any]=1e-5 , a_ : Optional[int]=True , a_ : Dict=0 , a_ : str=2 , a_ : Tuple=False , a_ : Tuple=True , a_ : Optional[Any]=None , **a_ : List[Any] , ):
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : str = rotary_pct
lowerCAmelCase_ : Tuple = rotary_emb_base
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : str = hidden_dropout
lowerCAmelCase_ : str = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : int = tie_word_embeddings
lowerCAmelCase_ : str = use_parallel_residual
lowerCAmelCase_ : List[str] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCamelCase ( self : List[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
lowerCAmelCase_ : List[str] = self.rope_scaling.get("type" , a_ )
lowerCAmelCase_ : Optional[Any] = self.rope_scaling.get("factor" , a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a_ , a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 241 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Union[str, Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Dict = ["""flax"""]
def __init__( self : Any , *a_ : Optional[int] , **a_ : str ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : str , *a_ : Optional[int] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Dict , **a_ : str ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[int] , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Dict , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Tuple , *a_ : Optional[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[int] , *a_ : List[Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : str , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Any , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""flax"""]
def __init__( self : Dict , *a_ : str , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Any , *a_ : Any , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Dict , **a_ : Dict ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : List[Any] , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Tuple , *a_ : Optional[int] , **a_ : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[str] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Optional[int] , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
| 241 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ : Union[str, Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
UpperCamelCase__ : List[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
UpperCamelCase__ : str = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def UpperCAmelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase="uniform_average" , _lowerCamelCase=True ) -> Any:
A_ : Optional[Any] = mean_squared_error(
_lowerCamelCase , _lowerCamelCase , sample_weight=_lowerCamelCase , multioutput=_lowerCamelCase , squared=_lowerCamelCase )
return {"mse": mse}
| 353 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 164 | 0 |
import cva
import numpy as np
class _snake_case :
def __init__( self , _a , _a ):
if k in (0.04, 0.06):
__magic_name__ : Optional[Any] = k
__magic_name__ : Any = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ):
return str(self.k )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[str] = cva.imread(_a , 0 )
__magic_name__ , __magic_name__ : Union[str, Any] = img.shape
__magic_name__ : list[list[int]] = []
__magic_name__ : Optional[int] = img.copy()
__magic_name__ : Any = cva.cvtColor(_a , cva.COLOR_GRAY2RGB )
__magic_name__ , __magic_name__ : Tuple = np.gradient(_a )
__magic_name__ : Optional[int] = dx**2
__magic_name__ : List[str] = dy**2
__magic_name__ : List[str] = dx * dy
__magic_name__ : Any = 0.04
__magic_name__ : List[str] = self.window_size // 2
for y in range(_a , h - offset ):
for x in range(_a , w - offset ):
__magic_name__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__magic_name__ : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__magic_name__ : List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__magic_name__ : Dict = (wxx * wyy) - (wxy**2)
__magic_name__ : int = wxx + wyy
__magic_name__ : Tuple = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
snake_case : List[Any] = HarrisCorner(0.04, 3)
snake_case ,snake_case : Optional[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 281 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE :Optional[Any] = """bert-base-cased"""
SCREAMING_SNAKE_CASE :Optional[int] = """google/pegasus-xsum"""
SCREAMING_SNAKE_CASE :Union[str, Any] = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
SCREAMING_SNAKE_CASE :Optional[Any] = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
SCREAMING_SNAKE_CASE :List[str] = """patrickvonplaten/t5-tiny-random"""
SCREAMING_SNAKE_CASE :Any = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE :Dict = """sshleifer/tiny-mbart"""
SCREAMING_SNAKE_CASE :Union[str, Any] = """sshleifer/tiny-marian-en-de"""
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = '\n'.join(__a )
Path(__a ).open("w" ).writelines(__a )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , f"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , f"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ ( snake_case_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCAmelCase_ ( self , _lowercase )-> Optional[int]:
UpperCamelCase_ = AutoTokenizer.from_pretrained(_A )
UpperCamelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCamelCase_ = max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
UpperCamelCase_ = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
UpperCamelCase_ = 4
UpperCamelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCamelCase_ = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
UpperCamelCase_ = SeqaSeqDataset(
_A , data_dir=_A , type_path="train" , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , )
UpperCamelCase_ = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_A , _A )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCamelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCAmelCase_ ( self , _lowercase )-> Any:
UpperCamelCase_ = AutoTokenizer.from_pretrained(_A )
UpperCamelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCamelCase_ = max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
UpperCamelCase_ = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
UpperCamelCase_ = 4
UpperCamelCase_ = LegacySeqaSeqDataset(
_A , data_dir=_A , type_path="train" , max_source_length=20 , max_target_length=_A , )
UpperCamelCase_ = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
UpperCamelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCamelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
UpperCamelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_A , _A , 128 , _A )
UpperCamelCase_ = {x.name for x in tmp_dir.iterdir()}
UpperCamelCase_ = {x.name for x in save_dir.iterdir()}
UpperCamelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_A ) < len(_A )
assert len(_A ) == 1
assert len(packed_examples[0] ) == sum(len(_A ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def UpperCAmelCase_ ( self )-> Any:
if not FAIRSEQ_AVAILABLE:
return
UpperCamelCase_ = self._get_dataset(max_len=64 )
UpperCamelCase_ = 64
UpperCamelCase_ = ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A )
UpperCamelCase_ = [len(_A ) for x in batch_sampler]
assert len(set(_A ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_A ) == len(_A ) # no dropped or added examples
UpperCamelCase_ = DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2 )
UpperCamelCase_ = []
UpperCamelCase_ = []
for batch in data_loader:
UpperCamelCase_ = batch['input_ids'].shape
UpperCamelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCamelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_A )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_A )
assert num_src_per_batch[0] == max(_A )
if failures:
raise AssertionError(F"too many tokens in {len(_A )} batches" )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self._get_dataset(max_len=512 )
UpperCamelCase_ = 2
UpperCamelCase_ = ds.make_sortish_sampler(_A , shuffle=_A )
UpperCamelCase_ = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 )
UpperCamelCase_ = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A )
UpperCamelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_lowercase , _lowercase="input_ids" ):
return [batch[k].eq(_A ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_A , k="labels" ) ) < sum(count_pad_tokens(_A , k="labels" ) )
assert sum(count_pad_tokens(_A ) ) < sum(count_pad_tokens(_A ) )
assert len(_A ) == len(_A )
def UpperCAmelCase_ ( self , _lowercase=1_000 , _lowercase=128 )-> int:
if os.getenv("USE_REAL_DATA" , _A ):
UpperCamelCase_ = 'examples/seq2seq/wmt_en_ro'
UpperCamelCase_ = max_len * 2 * 64
if not Path(_A ).joinpath("train.len" ).exists():
save_len_file(_A , _A )
else:
UpperCamelCase_ = 'examples/seq2seq/test_data/wmt_en_ro'
UpperCamelCase_ = max_len * 4
save_len_file(_A , _A )
UpperCamelCase_ = AutoTokenizer.from_pretrained(_A )
UpperCamelCase_ = SeqaSeqDataset(
_A , data_dir=_A , type_path="train" , max_source_length=_A , max_target_length=_A , n_obs=_A , )
return ds, max_tokens, tokenizer
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self._get_dataset()
UpperCamelCase_ = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=0 , add_extra_examples=_A ) )
UpperCamelCase_ = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=1 , add_extra_examples=_A ) )
assert idsa.intersection(_A ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCAmelCase_ ( self , _lowercase )-> Tuple:
UpperCamelCase_ = AutoTokenizer.from_pretrained(_A , use_fast=_A )
if tok_name == MBART_TINY:
UpperCamelCase_ = SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
UpperCamelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCamelCase_ = SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
UpperCamelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_A ) == 1 if tok_name == BART_TINY else len(_A ) == 0
| 371 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False )-> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = input_str.split("_" )
UpperCamelCase_ = 0 if use_pascal else 1
UpperCamelCase_ = words[start_index:]
UpperCamelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase_ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.