code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Load configuration defined in the metadata file
with open(__A ) as metadata_file:
__SCREAMING_SNAKE_CASE : Any = json.load(__A )
__SCREAMING_SNAKE_CASE : Optional[int] = LukeConfig(use_entity_aware_attention=__A , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__SCREAMING_SNAKE_CASE : List[str] = torch.load(__A , map_location='''cpu''' )
# Load the entity vocab file
__SCREAMING_SNAKE_CASE : List[Any] = load_entity_vocab(__A )
__SCREAMING_SNAKE_CASE : int = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__SCREAMING_SNAKE_CASE : str = AddedToken('''<ent>''' , lstrip=__A , rstrip=__A )
__SCREAMING_SNAKE_CASE : List[str] = AddedToken('''<ent2>''' , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__A , __A )
__SCREAMING_SNAKE_CASE : Optional[Any] = LukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
__SCREAMING_SNAKE_CASE : int = state_dict['''embeddings.word_embeddings.weight''']
__SCREAMING_SNAKE_CASE : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__SCREAMING_SNAKE_CASE : List[str] = F'''encoder.layer.{layer_index}.attention.self.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE : List[Any] = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__SCREAMING_SNAKE_CASE : Tuple = state_dict['''entity_embeddings.entity_embeddings.weight''']
__SCREAMING_SNAKE_CASE : List[str] = entity_emb[entity_vocab['''[MASK]''']]
__SCREAMING_SNAKE_CASE : Optional[Any] = LukeModel(config=__A ).eval()
__SCREAMING_SNAKE_CASE : Tuple = model.load_state_dict(__A , strict=__A )
if not (len(__A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(__A )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
__SCREAMING_SNAKE_CASE : Optional[int] = LukeTokenizer.from_pretrained(__A , task='''entity_classification''' )
__SCREAMING_SNAKE_CASE : Any = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
__SCREAMING_SNAKE_CASE : List[str] = (39, 42)
__SCREAMING_SNAKE_CASE : Dict = tokenizer(__A , entity_spans=[span] , add_prefix_space=__A , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Dict = model(**__A )
# Verify word hidden states
if model_size == "large":
__SCREAMING_SNAKE_CASE : int = torch.Size((1, 42, 1024) )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
__SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 42, 768) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__SCREAMING_SNAKE_CASE : str = torch.Size((1, 1, 1024) )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
__SCREAMING_SNAKE_CASE : str = torch.Size((1, 1, 768) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = {}
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = line.rstrip().split('''\t''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = index
return entity_vocab
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 696 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase = object()
def _UpperCAmelCase ( __A : List[str] , __A : Tuple ):
a_ : List[Any] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
a_ : Union[str, Any] = [x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def _UpperCAmelCase ( __A : List[str] ):
def replace(__A : int , __A : Union[str, Any] ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def _UpperCAmelCase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __A )),
(("transformer", "wte", "embedding"), P('''mp''' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = _get_partition_rules()
a_ : Tuple = _replacement_rules(__A )
a_ : Optional[Any] = {k: _unmatched for k in flatten_dict(__A )}
a_ : Optional[Any] = {k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) )
| 466 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 41 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Optional[int] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 635 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = """
Human: <<task>>
Assistant: """
A = """huggingface-tools/default-prompts"""
A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]:
"""simple docstring"""
if prompt_or_repo_id is None:
__UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCamelCase ) is not None:
return prompt_or_repo_id
__UpperCAmelCase : str = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 77 | 0 |
"""simple docstring"""
import heapq
import sys
import numpy as np
a : Dict = tuple[int, int]
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase__ = []
UpperCAmelCase__ = set()
def A__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def A__ ( self ):
return len(self.elements ) == 0
def A__ ( self , __lowercase , __lowercase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__lowercase )
else:
# update
# print("update", item)
UpperCAmelCase__ = []
((UpperCAmelCase__) , (UpperCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((UpperCAmelCase__) , (UpperCAmelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def A__ ( self , __lowercase ):
if item in self.set:
self.set.remove(__lowercase )
UpperCAmelCase__ = []
((UpperCAmelCase__) , (UpperCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((UpperCAmelCase__) , (UpperCAmelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def A__ ( self ):
return self.elements[0][1]
def A__ ( self ):
((UpperCAmelCase__) , (UpperCAmelCase__)) = heapq.heappop(self.elements )
self.set.remove(__lowercase )
return (priority, item)
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
# euclidean distance
UpperCAmelCase__ = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = np.array(_SCREAMING_SNAKE_CASE )
return np.linalg.norm(a - b )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
# integer division by time variable
return consistent_heuristic(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) // t
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
UpperCAmelCase__ = g_function[start] + Wa * heuristics[i](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return ans
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
UpperCAmelCase__ = np.chararray((n, n) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = """*"""
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if (j, (n - 1) - i) in blocks:
UpperCAmelCase__ = """#"""
UpperCAmelCase__ = """-"""
UpperCAmelCase__ = back_pointer[goal]
while x != start:
((UpperCAmelCase__) , (UpperCAmelCase__)) = x
# print(x)
UpperCAmelCase__ = """-"""
UpperCAmelCase__ = back_pointer[x]
UpperCAmelCase__ = """-"""
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
UpperCAmelCase__ = back_pointer[goal]
while x != start:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
UpperCAmelCase__ = back_pointer[x]
print(_SCREAMING_SNAKE_CASE )
sys.exit()
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Dict:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->str:
for itera in range(_SCREAMING_SNAKE_CASE ):
open_list[itera].remove_element(_SCREAMING_SNAKE_CASE )
# print("s", s)
# print("j", j)
((UpperCAmelCase__) , (UpperCAmelCase__)) = s
UpperCAmelCase__ = (x - 1, y)
UpperCAmelCase__ = (x + 1, y)
UpperCAmelCase__ = (x, y + 1)
UpperCAmelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_SCREAMING_SNAKE_CASE ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = -1
UpperCAmelCase__ = float("""inf""" )
if valid(_SCREAMING_SNAKE_CASE ) and g_function[neighbours] > g_function[s] + 1:
UpperCAmelCase__ = g_function[s] + 1
UpperCAmelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if neighbours not in close_list_inad:
for var in range(1 , _SCREAMING_SNAKE_CASE ):
if key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) <= Wa * key(
_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
open_list[j].put(
_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case__ ( ) ->Any:
UpperCAmelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
a : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a : Union[str, Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a : int = make_common_ground()
a : Tuple = blocks_blk
# hyper parameters
a : Optional[Any] = 1
a : List[str] = 1
a : Any = 20
a : Tuple = 3 # one consistent and two other inconsistent
# start and end destination
a : int = (0, 0)
a : str = (n - 1, n - 1)
a : Any = 1
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
UpperCAmelCase__ = {start: 0, goal: float("""inf""" )}
UpperCAmelCase__ = {start: -1, goal: -1}
UpperCAmelCase__ = []
UpperCAmelCase__ = set()
for i in range(_SCREAMING_SNAKE_CASE ):
open_list.append(PriorityQueue() )
open_list[i].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , _SCREAMING_SNAKE_CASE ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ , UpperCAmelCase__ = open_list[i].top_show()
visited.add(_SCREAMING_SNAKE_CASE )
expand_state(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
close_list_inad.append(_SCREAMING_SNAKE_CASE )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ = open_list[0].top_show()
visited.add(_SCREAMING_SNAKE_CASE )
expand_state(
_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
close_list_anchor.append(_SCREAMING_SNAKE_CASE )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 422 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = old_name
if "patch_embed" in old_name:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = old_name.split(""".""" )
if layer == "0":
UpperCAmelCase__ = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
UpperCAmelCase__ = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
UpperCAmelCase__ = old_name.replace("""3""" , """convolution2""" )
else:
UpperCAmelCase__ = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = r"""\b\d{2}\b"""
if bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = re.search(r"""\d\.\d\d.""" , _SCREAMING_SNAKE_CASE ).group()
else:
UpperCAmelCase__ = re.search(r"""\d\.\d.""" , _SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
UpperCAmelCase__ = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
UpperCAmelCase__ = """intermediate_stages.""" + trimmed_name
else:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
UpperCAmelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc2""" , """linear_out""" )
UpperCAmelCase__ = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
UpperCAmelCase__ = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
UpperCAmelCase__ = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
UpperCAmelCase__ = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
UpperCAmelCase__ = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
UpperCAmelCase__ = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase__ = new_name.replace("""norm""" , """layernorm""" )
UpperCAmelCase__ = """efficientformer.""" + new_name
else:
UpperCAmelCase__ = """efficientformer.encoder.""" + new_name
return new_name
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
for key in checkpoint.copy().keys():
UpperCAmelCase__ = checkpoint.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = val
return checkpoint
def snake_case__ ( ) ->Optional[Any]:
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ = EfficientFormerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = EfficientFormerForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
UpperCAmelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase__ = convert_torch_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = 2_5_6
UpperCAmelCase__ = 2_2_4
UpperCAmelCase__ = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
UpperCAmelCase__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# original processing pipeline
UpperCAmelCase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
] )
UpperCAmelCase__ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = (1, 1_0_0_0)
if "l1" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
a : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 422 | 1 |
from __future__ import annotations
import numpy as np
def a_ ( lowerCAmelCase_ : list[float] ):
return np.maximum(0, lowerCAmelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 53 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any ={
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 206 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""nielsr/canine-s""": 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
SCREAMING_SNAKE_CASE__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0xe_000
SCREAMING_SNAKE_CASE__ = 0xe_001
SCREAMING_SNAKE_CASE__ = 0xe_002
SCREAMING_SNAKE_CASE__ = 0xe_003
SCREAMING_SNAKE_CASE__ = 0xe_004
# Maps special codepoints to human-readable names.
SCREAMING_SNAKE_CASE__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: """[CLS]""",
SEP: """[SEP]""",
BOS: """[BOS]""",
MASK: """[MASK]""",
PAD: """[PAD]""",
RESERVED: """[RESERVED]""",
}
# Maps special codepoint human-readable names to their codepoint values.
SCREAMING_SNAKE_CASE__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowerCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=False , UpperCAmelCase=2048 , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , model_max_length=UpperCAmelCase , **UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
lowercase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowercase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowercase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowercase_ = UNICODE_VOCAB_SIZE
lowercase_ = len(self._special_codepoints )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return list(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
try:
return ord(UpperCAmelCase )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return "".join(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase )) + [1]
return result
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[str]:
'''simple docstring'''
return ()
| 714 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AutoencoderKL
lowerCAmelCase__ = "sample"
lowerCAmelCase__ = 1E-2
@property
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
return {"sample": image}
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ) -> Any:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(UpperCAmelCase )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**UpperCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
lowercase_ = model.to(UpperCAmelCase )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , sample_posterior=UpperCAmelCase , generator=UpperCAmelCase ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowercase_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase ) for s in shape] )}.npy'
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , UpperCAmelCase=0 , UpperCAmelCase=(4, 3, 512, 512) , UpperCAmelCase=False ) -> str:
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) ).to(UpperCAmelCase ).to(UpperCAmelCase )
return image
def A__ ( self , UpperCAmelCase="CompVis/stable-diffusion-v1-4" , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowercase_ = "fp16" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
UpperCAmelCase , subfolder="vae" , torch_dtype=UpperCAmelCase , revision=UpperCAmelCase , )
model.to(UpperCAmelCase ).eval()
return model
def A__ ( self , UpperCAmelCase=0 ) -> int:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase )
return torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , fpaa=UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.encode(UpperCAmelCase ).latent_dist
lowercase_ = dist.sample(generator=UpperCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
lowercase_ = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase )
| 601 | 0 |
def A_ ( A__ , A__ ) -> bool:
a__ : List[Any] = len(A__ ) + 1
a__ : Optional[Any] = len(A__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
a__ : Optional[int] = [[0 for i in range(A__ )] for j in range(A__ )]
# since string of zero length match pattern of zero length
a__ : List[str] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A__ ):
a__ : str = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A__ ):
a__ : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A__ ):
for j in range(1 , A__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
a__ : List[str] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
a__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
a__ : Union[str, Any] = dp[i - 1][j]
else:
a__ : List[str] = 0
else:
a__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowercase : List[Any] = """aab"""
lowercase : Optional[int] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 302 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase , cache_dir=lowercase)
a__ : Any = [t[-1] for t in os.walk(os.path.join(lowercase , os.listdir(lowercase)[0] , 'snapshots'))]
a__ : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase)
a__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Tuple = jax.random.PRNGKey(0)
a__ : str = 4
a__ : Dict = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : Any = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : str = replicate(lowercase)
a__ : Dict = jax.random.split(lowercase , lowercase)
a__ : Dict = shard(lowercase)
a__ : List[str] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1e-3
assert np.abs(np.abs(lowercase , dtype=np.floataa).sum() - 4_99_47.8_75) < 5e-1
a__ : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(lowercase) == num_samples
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : int = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=lowercase)
a__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[str] = jax.random.PRNGKey(0)
a__ : Any = 50
a__ : Tuple = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : Optional[int] = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : List[Any] = replicate(lowercase)
a__ : int = jax.random.split(lowercase , lowercase)
a__ : Optional[int] = shard(lowercase)
a__ : str = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5e-1
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase)
a__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Optional[int] = jax.random.PRNGKey(0)
a__ : Dict = 50
a__ : List[Any] = jax.device_count()
a__ : Dict = num_samples * [prompt]
a__ : Any = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : Optional[Any] = replicate(lowercase)
a__ : List[Any] = jax.random.split(lowercase , lowercase)
a__ : Optional[Any] = shard(lowercase)
a__ : Optional[Any] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5e-1
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
a__ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[Any] = jax.random.PRNGKey(0)
a__ : List[Any] = 50
a__ : int = jax.device_count()
a__ : Tuple = num_samples * [prompt]
a__ : Dict = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : int = replicate(lowercase)
a__ : List[str] = jax.random.split(lowercase , lowercase)
a__ : Optional[Any] = shard(lowercase)
a__ : Tuple = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5e-1
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=lowercase , steps_offset=1 , )
a__ , a__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=lowercase , safety_checker=lowercase , )
a__ : str = scheduler.create_state()
a__ : List[str] = scheduler_state
a__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[str] = jax.random.PRNGKey(0)
a__ : List[Any] = 50
a__ : Tuple = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : List[Any] = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : List[Any] = replicate(lowercase)
a__ : Any = jax.random.split(lowercase , lowercase)
a__ : Optional[int] = shard(lowercase)
a__ : Optional[int] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5e-1
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Optional[Any] = jax.device_count()
a__ : List[str] = num_samples * [prompt]
a__ : List[str] = jax.random.split(jax.random.PRNGKey(0) , lowercase)
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase , )
a__ : List[str] = replicate(lowercase)
a__ : int = pipeline.prepare_inputs(lowercase)
a__ : Dict = shard(lowercase)
a__ : Tuple = pipeline(lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a__ : Tuple = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase , use_memory_efficient_attention=lowercase , )
a__ : int = replicate(lowercase)
a__ : str = pipeline.prepare_inputs(lowercase)
a__ : Dict = shard(lowercase)
a__ : int = pipeline(lowercase , lowercase , lowercase , jit=lowercase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a__ : Dict = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2
| 302 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( a__ ):
'''simple docstring'''
__A = """gptsan-japanese"""
__A = [
"""past_key_values""",
]
__A = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowercase_ : Tuple=36000 , lowercase_ : Any=1280 , lowercase_ : Optional[int]=1024 , lowercase_ : Optional[Any]=8192 , lowercase_ : int=4096 , lowercase_ : Optional[Any]=128 , lowercase_ : List[str]=10 , lowercase_ : Any=0 , lowercase_ : Dict=16 , lowercase_ : List[str]=16 , lowercase_ : str=128 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Tuple=1e-5 , lowercase_ : Union[str, Any]=False , lowercase_ : List[Any]=0.0 , lowercase_ : Dict="float32" , lowercase_ : List[Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=False , lowercase_ : Dict=0.0_02 , lowercase_ : Tuple=False , lowercase_ : Any=True , lowercase_ : Optional[int]=35998 , lowercase_ : Optional[int]=35995 , lowercase_ : str=35999 , **lowercase_ : str , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = d_ff
_UpperCamelCase = d_ext
_UpperCamelCase = d_spout
_UpperCamelCase = num_switch_layers
_UpperCamelCase = num_ext_layers
_UpperCamelCase = num_switch_layers + num_ext_layers
_UpperCamelCase = num_heads
_UpperCamelCase = num_experts
_UpperCamelCase = expert_capacity
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = router_bias
_UpperCamelCase = router_jitter_noise
_UpperCamelCase = router_dtype
_UpperCamelCase = router_ignore_padding_tokens
_UpperCamelCase = output_hidden_states
_UpperCamelCase = output_attentions
_UpperCamelCase = initializer_factor
_UpperCamelCase = output_router_logits
_UpperCamelCase = use_cache
super().__init__(
separator_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 709 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = CLIPTokenizer
UpperCamelCase = CLIPTokenizerFast
UpperCamelCase = True
UpperCamelCase = {}
UpperCamelCase = False
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
# fmt: off
lowerCamelCase_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase_ = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
lowerCamelCase_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCamelCase_ = {"""unk_token""": """<unk>"""}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase__ ) )
def a__ ( self : str , **A_ : Tuple ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def a__ ( self : str , **A_ : Any ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def a__ ( self : List[str] , A_ : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = """lower newer"""
lowerCamelCase_ = """lower newer"""
return input_text, output_text
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = """lower newer"""
lowerCamelCase_ = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCamelCase_ = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
lowerCamelCase_ = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ )
lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCamelCase_ = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ )
lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
lowerCamelCase_ = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ )
lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
lowerCamelCase_ = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ )
lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase_ = f"""{text_of_1_token} {text_of_1_token}"""
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
lowerCamelCase_ = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
lowerCamelCase_ = f""" {text}"""
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
lowerCamelCase_ = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
| 704 |
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651 | 0 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = Mock()
__lowercase = conn, Mock()
__lowercase = iter([1, None] )
__lowercase = lambda A__ : next(A__ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=A__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 41 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase ) -> list[int]:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : List[Any] = int(math.sqrt(lowerCAmelCase ) ) # Size of every segment
UpperCAmelCase__ : Dict = [True] * (end + 1)
UpperCAmelCase__ : str = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase )
for i in range(start * start , end + 1 , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[Any] = end + 1
UpperCAmelCase__ : Dict = min(2 * end , lowerCAmelCase )
while low <= n:
UpperCAmelCase__ : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : Optional[int] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase , high + 1 , lowerCAmelCase ):
UpperCAmelCase__ : int = False
for j in range(len(lowerCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Dict = high + 1
UpperCAmelCase__ : int = min(high + end , lowerCAmelCase )
return prime
print(sieve(10**6))
| 182 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter image url: ''').strip()
print(f'''Downloading image from {url} ...''')
lowerCAmelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCAmelCase__ = requests.get(image_url).content
lowerCAmelCase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 544 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__a , speech_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , )
def snake_case_ (self , __a = "auto" ) -> str:
if slice_size == "auto":
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case_ (self ) -> List[Any]:
self.enable_attention_slicing(__a )
@torch.no_grad()
def __call__(self , __a , __a=1_60_00 , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> List[str]:
UpperCamelCase = self.speech_processor.feature_extractor(
__a , return_tensors="pt" , sampling_rate=__a ).input_features.to(self.device )
UpperCamelCase = self.speech_model.generate(__a , max_length=48_00_00 )
UpperCamelCase = self.speech_processor.tokenizer.batch_decode(__a , skip_special_tokens=__a , normalize=__a )[
0
]
if isinstance(__a , __a ):
UpperCamelCase = 1
elif isinstance(__a , __a ):
UpperCamelCase = len(__a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__a )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__a )}." )
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape
UpperCamelCase = text_embeddings.repeat(1 , __a , 1 )
UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , __a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = 42
if negative_prompt is None:
UpperCamelCase = [""] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !="
F" {type(__a )}." )
elif isinstance(__a , __a ):
UpperCamelCase = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
UpperCamelCase = negative_prompt
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=__a , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = uncond_embeddings.shape[1]
UpperCamelCase = uncond_embeddings.repeat(1 , __a , 1 )
UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase = torch.randn(__a , generator=__a , device="cpu" , dtype=__a ).to(
self.device )
else:
UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
UpperCamelCase = 1 / 0.18215 * latents
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 544 | 1 |
import math
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
or isinstance(lowerCamelCase_ , lowerCamelCase_ )
or isinstance(lowerCamelCase_ , lowerCamelCase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.QuantumRegister(4 , 'qr' )
SCREAMING_SNAKE_CASE_ : Any = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
SCREAMING_SNAKE_CASE_ : Optional[int] = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE_ : int = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase_ ) # measure the last two qbits
SCREAMING_SNAKE_CASE_ : Any = qiskit.Aer.get_backend('aer_simulator' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 105 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : Union[str, Any] = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = "roberta-prelayernorm"
def __init__( self : Dict , A : Dict=5_0_2_6_5 , A : Tuple=7_6_8 , A : str=1_2 , A : Optional[Any]=1_2 , A : Union[str, Any]=3_0_7_2 , A : Dict="gelu" , A : Optional[int]=0.1 , A : Union[str, Any]=0.1 , A : str=5_1_2 , A : Any=2 , A : Optional[Any]=0.02 , A : str=1e-12 , A : Optional[Any]=1 , A : Dict=0 , A : Any=2 , A : Any="absolute" , A : str=True , A : List[str]=None , **A : Any , ) ->str:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : str = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : Union[str, Any] = use_cache
lowerCamelCase__ : Tuple = classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@property
def __lowerCamelCase ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315 | 0 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = 2
UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 405 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__a = logging.getLogger(__name__)
__a = {'facebook/bart-base': BartForConditionalGeneration}
__a = {'facebook/bart-base': BartTokenizer}
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file", type=_snake_case, default=_snake_case, help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length", type=_snake_case, default=5, help="The maximum total input sequence length after tokenization.", )
parser.add_argument(
"--num_beams", type=_snake_case, default=_snake_case, help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
), )
parser.add_argument(
"--model_name_or_path", type=_snake_case, help="Path to pretrained model or model identifier from huggingface.co/models.", required=_snake_case, )
parser.add_argument(
"--config_name", type=_snake_case, default=_snake_case, help="Pretrained config name or path if not the same as model_name", )
parser.add_argument(
"--device", type=_snake_case, default="cpu", help="Device where the model will be run", )
parser.add_argument("--output_file_path", type=_snake_case, default=_snake_case, help="Where to store the final ONNX file." )
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
return args
def __UpperCAmelCase ( a_: Any, a_: Optional[int]="cpu" ):
_UpperCAmelCase : Tuple = model_dict[model_name].from_pretrained(_snake_case ).to(_snake_case )
_UpperCAmelCase : int = tokenizer_dict[model_name].from_pretrained(_snake_case )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any], a_: List[str], a_: List[str], a_: Dict ):
model.eval()
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : List[str] = torch.jit.script(BARTBeamSearchGenerator(_snake_case ) )
with torch.no_grad():
_UpperCAmelCase : str = "My friends are cool but they eat too many carbs."
_UpperCAmelCase : int = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1_024, return_tensors="pt" ).to(model.device )
_UpperCAmelCase : str = model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], num_beams=_snake_case, max_length=_snake_case, early_stopping=_snake_case, decoder_start_token_id=model.config.decoder_start_token_id, )
torch.onnx.export(
_snake_case, (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
), _snake_case, opset_version=14, input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"], output_names=["output_ids"], dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
}, example_outputs=_snake_case, )
logger.info("Model exported to {}".format(_snake_case ) )
_UpperCAmelCase : str = remove_dup_initializers(os.path.abspath(_snake_case ) )
logger.info("Deduplicated and optimized model written to {}".format(_snake_case ) )
_UpperCAmelCase : Optional[int] = onnxruntime.InferenceSession(_snake_case )
_UpperCAmelCase : List[Any] = ort_sess.run(
_snake_case, {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(_snake_case ),
"max_length": np.array(_snake_case ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
}, )
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1e-3, atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = parse_args()
_UpperCAmelCase : int = 5
_UpperCAmelCase : Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase : Any = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase : Tuple = load_model_tokenizer(args.model_name_or_path, _snake_case )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(_snake_case )
if args.max_length:
_UpperCAmelCase : Union[str, Any] = args.max_length
if args.num_beams:
_UpperCAmelCase : List[str] = args.num_beams
if args.output_file_path:
_UpperCAmelCase : str = args.output_file_path
else:
_UpperCAmelCase : Optional[int] = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )
if __name__ == "__main__":
main() | 494 |
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self :Dict, snake_case :str="sayef/fsner-bert-base-uncased"):
"""simple docstring"""
super(snake_case, self).__init__()
_lowercase =AutoModel.from_pretrained(snake_case, return_dict=snake_case)
_lowercase =torch.nn.CosineSimilarity(3, 1e-0_8)
_lowercase =torch.nn.Softmax(dim=1)
def UpperCamelCase__ ( self :str, **snake_case :int):
"""simple docstring"""
return self.bert(**snake_case).last_hidden_state
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Optional[Any]):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=snake_case)
def UpperCamelCase__ ( self :List[Any], snake_case :int, snake_case :Dict, snake_case :Dict=1):
"""simple docstring"""
return self.softmax(T * self.cos(snake_case, snake_case))
def UpperCamelCase__ ( self :List[str], snake_case :int, snake_case :List[str]):
"""simple docstring"""
_lowercase =W_supports['sizes'].tolist()
_lowercase =W_supports['start_token_id'].item()
_lowercase =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowercase =self.BERT(**snake_case)
_lowercase =self.BERT(**snake_case)
_lowercase =None
_lowercase =None
_lowercase =W_supports['input_ids'] == start_token_id
_lowercase =W_supports['input_ids'] == end_token_id
for i, size in enumerate(snake_case):
if i == 0:
_lowercase =0
else:
_lowercase =support_sizes[i - 1]
_lowercase =S[s : s + size][start_token_masks[s : s + size]]
_lowercase =S[s : s + size][end_token_masks[s : s + size]]
_lowercase =torch.matmul(q[i], s_start.T).sum(1).softmax(0)
_lowercase =torch.matmul(q[i], s_end.T).sum(1).softmax(0)
if p_starts is not None:
_lowercase =torch.vstack((p_starts, p_start))
_lowercase =torch.vstack((p_ends, p_end))
else:
_lowercase =p_start
_lowercase =p_end
return p_starts, p_ends
| 181 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A( UpperCAmelCase ):
@staticmethod
@abstractmethod
def lowercase__ ( __UpperCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self : Any ):
raise NotImplementedError()
| 103 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_A = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : List[str] = False
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : List[Any] = False
def A_ ( ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__SCREAMING_SNAKE_CASE , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__SCREAMING_SNAKE_CASE , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__SCREAMING_SNAKE_CASE , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__SCREAMING_SNAKE_CASE , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__SCREAMING_SNAKE_CASE , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__SCREAMING_SNAKE_CASE , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__SCREAMING_SNAKE_CASE , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__SCREAMING_SNAKE_CASE , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__SCREAMING_SNAKE_CASE , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__SCREAMING_SNAKE_CASE , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__SCREAMING_SNAKE_CASE , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__SCREAMING_SNAKE_CASE , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__SCREAMING_SNAKE_CASE , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__SCREAMING_SNAKE_CASE , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__SCREAMING_SNAKE_CASE , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__SCREAMING_SNAKE_CASE , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__SCREAMING_SNAKE_CASE , default=40_00 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
sanity_checks(__SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(__SCREAMING_SNAKE_CASE )
set_seed(__SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.all_special_tokens.index(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
__SCREAMING_SNAKE_CASE : int = special_tok_ids
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Dict = pickle.load(__SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = np.maximum(__SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Optional[int] = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : str = torch.from_numpy(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : str = LmSeqsDataset(params=__SCREAMING_SNAKE_CASE , data=__SCREAMING_SNAKE_CASE )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
__SCREAMING_SNAKE_CASE : Optional[int] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
__SCREAMING_SNAKE_CASE : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(__SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : str = Distiller(
params=__SCREAMING_SNAKE_CASE , dataset=__SCREAMING_SNAKE_CASE , token_probs=__SCREAMING_SNAKE_CASE , student=__SCREAMING_SNAKE_CASE , teacher=__SCREAMING_SNAKE_CASE )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 158 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : str = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
], dtype=tf.floataa, )
lowercase_ : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.intaa, ) # expected non filtered idx as noted above
lowercase_ : List[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.floataa, ) # expected non filtered values as noted above
lowercase_ : int = tf_top_k_top_p_filtering(snake_case__, top_k=10, top_p=0.6, min_tokens_to_keep=4 )
lowercase_ : Dict = output[output != -float("""inf""" )]
lowercase_ : Tuple = tf.cast(
tf.where(tf.not_equal(snake_case__, tf.constant(-float("""inf""" ), dtype=tf.floataa ) ) ), dtype=tf.intaa, )
tf.debugging.assert_near(snake_case__, snake_case__, rtol=1E-12 )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase , lowerCamelCase__ ):
'''simple docstring'''
if is_tf_available():
__a : Optional[int] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def snake_case__ ( self ) -> str:
"""simple docstring"""
# TF-only test: tf.saved_model export
lowercase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : int = 2
lowercase_ : Optional[Any] = 2
class UpperCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self, snake_case__ ) -> Any:
"""simple docstring"""
super(snake_case__, self ).__init__()
lowercase_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length), tf.intaa, name="""input_ids""" ),
tf.TensorSpec((None, input_length), tf.intaa, name="""attention_mask""" ),
), jit_compile=snake_case__, )
def snake_case__ ( self, snake_case__, snake_case__ ) -> int:
"""simple docstring"""
lowercase_ : Tuple = self.model.generate(
input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, )
return {"sequences": outputs["sequences"]}
lowercase_ : Union[str, Any] = [[2, 0], [1_02, 1_03]]
lowercase_ : Tuple = [[1, 0], [1, 1]]
lowercase_ : int = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} )
lowercase_ : List[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for batch_size in range(1, len(snake_case__ ) + 1 ):
lowercase_ : str = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ : Optional[int] = serving_func(**snake_case__ )["""sequences"""]
lowercase_ : Optional[int] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@slow
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
# TF-only test: tf.saved_model export
lowercase_ : int = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : Tuple = 1
lowercase_ : Tuple = 2
class UpperCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self, snake_case__ ) -> List[str]:
"""simple docstring"""
super(snake_case__, self ).__init__()
lowercase_ : Union[str, Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None), tf.intaa, name="""input_ids""" ),
tf.TensorSpec((batch_size, None), tf.intaa, name="""attention_mask""" ),
), jit_compile=snake_case__, )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = self.model.generate(
input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, )
return {"sequences": outputs["sequences"]}
lowercase_ : Union[str, Any] = [[2], [1_02, 1_03]]
lowercase_ : List[str] = [[1], [1, 1]]
lowercase_ : Union[str, Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} )
lowercase_ : Optional[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for input_row in range(len(snake_case__ ) ):
lowercase_ : List[str] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ : Union[str, Any] = serving_func(**snake_case__ )["""sequences"""]
lowercase_ : Optional[Any] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@slow
@require_tensorflow_text
def snake_case__ ( self ) -> Any:
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""", filename="""spiece.model""", local_dir=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase_ : Tuple = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case__, """spiece.model""" ), """rb""" ).read() )
lowercase_ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def snake_case__ ( self, snake_case__, *snake_case__, **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.tokenizer.tokenize(snake_case__ )
lowercase_ , lowercase_ : Union[str, Any] = text.pad_model_inputs(
snake_case__, max_seq_length=64, pad_value=self.model.config.pad_token_id )
lowercase_ : str = self.model.generate(input_ids=snake_case__, attention_mask=snake_case__ )
return self.tokenizer.detokenize(snake_case__ )
lowercase_ : Optional[int] = CompleteSentenceTransformer()
lowercase_ : Union[str, Any] = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name="""inputs""" )
lowercase_ : Any = complete_model(snake_case__ )
lowercase_ : Optional[Any] = tf.keras.Model(snake_case__, snake_case__ )
keras_model.save(snake_case__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowercase_ : Any = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : Dict = """Hello, my dog is cute and"""
lowercase_ : List[str] = tokenizer(snake_case__, return_tensors="""tf""" )
lowercase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : int = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ : int = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__ ( self ) -> str:
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowercase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : int = """Hugging Face is a technology company based in New York and Paris."""
lowercase_ : int = bart_tokenizer(snake_case__, return_tensors="""tf""" ).input_ids
lowercase_ : Tuple = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : Union[str, Any] = bart_model.generate(snake_case__ ).numpy()
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, snake_case__=None, **snake_case__ ) -> str:
"""simple docstring"""
return super().call(snake_case__, **snake_case__ )
lowercase_ : Tuple = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : str = bart_model.generate(snake_case__, foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(snake_case__, snake_case__ ) )
class UpperCamelCase__ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, **snake_case__ ) -> List[str]:
"""simple docstring"""
return super().call(snake_case__, **snake_case__ )
lowercase_ : Optional[int] = FakeEncoder(bart_model.config, bart_model.model.shared )
lowercase_ : Any = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ : Any = bart_model.generate(snake_case__ ).numpy()
with self.assertRaises(snake_case__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case__, foo="""bar""" ) | 458 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = state_dict.pop(A )
lowercase__ = val
def __a ( A ):
'''simple docstring'''
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def __a ( A ):
'''simple docstring'''
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:2_56, :]
lowercase__ = in_proj_bias_cross_attn[:2_56]
lowercase__ = in_proj_weight_cross_attn[2_56:5_12, :]
lowercase__ = in_proj_bias_cross_attn[2_56:5_12]
lowercase__ = in_proj_weight_cross_attn[-2_56:, :]
lowercase__ = in_proj_bias_cross_attn[-2_56:]
def __a ( A , A ):
'''simple docstring'''
lowercase__ , lowercase__ = image.size
lowercase__ = max(A , A )
lowercase__ = 8_00 if "detection" in checkpoint_url else 10_00
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __a ( A ):
'''simple docstring'''
lowercase__ = F.to_tensor(A )
lowercase__ = F.normalize(A , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __a ( A , A , A ):
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(A , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(A , A , A )
lowercase__ = rename_backbone_keys(A )
# query, key and value matrices need special treatment
read_in_q_k_v(A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(A )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 1_25
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
lowercase__ = TableTransformerForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=A )
lowercase__ = Image.open(A ).convert("RGB" )
lowercase__ = normalize(resize(A , A ) ).unsqueeze(0 )
lowercase__ = model(A )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowercase__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowercase__ = (1, 1_25, 7)
lowercase__ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowercase__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(A )
image_processor.push_to_hub(A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 | """simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 0 |
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if any(not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(UpperCAmelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCAmelCase__ ,sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 605 |
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
def merge(UpperCAmelCase__ ,UpperCAmelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = input('Enter numbers separated by a comma:\n').strip()
snake_case : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 605 | 1 |
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ) -> int:
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase, _lowercase=13, _lowercase=30, _lowercase=2, _lowercase=3, _lowercase=True, _lowercase=True, _lowercase=32, _lowercase=2, _lowercase=4, _lowercase=37, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=10, _lowercase=0.02, _lowercase=3, _lowercase=None, _lowercase=2, ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 2
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_lowercase, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = TFDeiTModel(config=_lowercase )
SCREAMING_SNAKE_CASE_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> Any:
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(config=_lowercase )
SCREAMING_SNAKE_CASE_ = model(_lowercase )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(_lowercase )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_lowercase )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_lowercase )
SCREAMING_SNAKE_CASE_ = model(_lowercase, labels=_lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_lowercase )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_lowercase, labels=_lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_a = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self, config_class=_lowercase, has_text_modality=_lowercase, hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ ( self ) -> str:
pass
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase, tf.keras.layers.Dense ) )
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1], _lowercase )
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def a__ ( self, _lowercase, _lowercase, _lowercase=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_lowercase, _lowercase, return_labels=_lowercase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ ( self ) -> Union[str, Any]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDeiTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCamelCase ( ) -> Tuple:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> Any:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowercase, return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, _lowercase )
SCREAMING_SNAKE_CASE_ = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], _lowercase, atol=1E-4 ) )
| 238 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = parent
__SCREAMING_SNAKE_CASE: int = batch_size
__SCREAMING_SNAKE_CASE: Optional[int] = seq_length
__SCREAMING_SNAKE_CASE: List[Any] = is_training
__SCREAMING_SNAKE_CASE: Tuple = use_input_mask
__SCREAMING_SNAKE_CASE: List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE: Any = use_labels
__SCREAMING_SNAKE_CASE: str = vocab_size
__SCREAMING_SNAKE_CASE: List[str] = hidden_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: Dict = num_attention_heads
__SCREAMING_SNAKE_CASE: Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE: Tuple = hidden_act
__SCREAMING_SNAKE_CASE: List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE: str = type_sequence_label_size
__SCREAMING_SNAKE_CASE: int = initializer_range
__SCREAMING_SNAKE_CASE: List[Any] = num_labels
__SCREAMING_SNAKE_CASE: int = num_choices
__SCREAMING_SNAKE_CASE: List[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: int = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: int = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: List[Any] = None
__SCREAMING_SNAKE_CASE: List[str] = None
__SCREAMING_SNAKE_CASE: List[str] = None
__SCREAMING_SNAKE_CASE: Tuple = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__snake_case , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = FalconModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[int] = model(__snake_case , attention_mask=__snake_case )
__SCREAMING_SNAKE_CASE: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = True
__SCREAMING_SNAKE_CASE: Union[str, Any] = FalconModel(__snake_case )
model.to(__snake_case )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , )
__SCREAMING_SNAKE_CASE: List[Any] = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = FalconForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
__SCREAMING_SNAKE_CASE: int = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = True
__SCREAMING_SNAKE_CASE: List[str] = True
__SCREAMING_SNAKE_CASE: Union[str, Any] = FalconForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE: int = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , )
__SCREAMING_SNAKE_CASE: Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE: str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE: List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE: int = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE: Optional[int] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE: Any = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE: Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
): int = config_and_inputs
__SCREAMING_SNAKE_CASE: Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (FalconForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = FalconModelTester(self )
__SCREAMING_SNAKE_CASE: str = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__SCREAMING_SNAKE_CASE: Any = alibi
self.model_tester.create_and_check_model(__snake_case , *__snake_case )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: Optional[Any] = 3
__SCREAMING_SNAKE_CASE: Tuple = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE: Optional[int] = input_ids.ne(1 ).to(__snake_case )
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: Optional[int] = FalconForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: List[str] = 3
__SCREAMING_SNAKE_CASE: int = '''single_label_classification'''
__SCREAMING_SNAKE_CASE: int = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE: Optional[int] = input_ids.ne(1 ).to(__snake_case )
__SCREAMING_SNAKE_CASE: Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: Union[str, Any] = FalconForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: List[Any] = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE: Tuple = FalconForCausalLM(__snake_case )
model.to(__snake_case )
model.eval()
__SCREAMING_SNAKE_CASE: Any = model(__snake_case , use_cache=__snake_case )
__SCREAMING_SNAKE_CASE: int = input_ids.shape[0]
__SCREAMING_SNAKE_CASE: List[str] = model._convert_to_rw_cache(result.past_key_values )
__SCREAMING_SNAKE_CASE: str = model._convert_cache_to_standard_format(__snake_case , __snake_case )
for layer in range(len(__snake_case ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: Optional[int] = 3
__SCREAMING_SNAKE_CASE: Union[str, Any] = '''multi_label_classification'''
__SCREAMING_SNAKE_CASE: int = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE: int = input_ids.ne(1 ).to(__snake_case )
__SCREAMING_SNAKE_CASE: Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE: Tuple = FalconForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__SCREAMING_SNAKE_CASE: Tuple = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__snake_case , '''use_cache''' ):
return
__SCREAMING_SNAKE_CASE: Tuple = model_class(__snake_case ).to(__snake_case )
if "use_cache" not in inputs:
__SCREAMING_SNAKE_CASE: Tuple = True
__SCREAMING_SNAKE_CASE: Any = model(**__snake_case )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__SCREAMING_SNAKE_CASE: Dict = (
getattr(__snake_case , '''decoder_layers''' , __snake_case )
or getattr(__snake_case , '''num_decoder_layers''' , __snake_case )
or config.num_hidden_layers
)
__SCREAMING_SNAKE_CASE: int = getattr(__snake_case , '''num_kv_heads''' , config.num_attention_heads )
__SCREAMING_SNAKE_CASE: str = getattr(__snake_case , '''d_model''' , config.hidden_size )
__SCREAMING_SNAKE_CASE: Union[str, Any] = embed_dim // num_attention_heads
__SCREAMING_SNAKE_CASE: Optional[int] = outputs['''past_key_values''']
self.assertEqual(len(__snake_case ) , __snake_case )
__SCREAMING_SNAKE_CASE: Union[str, Any] = inputs['''input_ids'''].shape
for i in range(__snake_case ):
if config.new_decoder_architecture:
__SCREAMING_SNAKE_CASE: Tuple = config.num_attention_heads
elif config.multi_query:
__SCREAMING_SNAKE_CASE: Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class a ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
__SCREAMING_SNAKE_CASE: str = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__snake_case )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
__SCREAMING_SNAKE_CASE: Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__SCREAMING_SNAKE_CASE: int = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=19 )
__SCREAMING_SNAKE_CASE: int = tokenizer.batch_decode(__snake_case )[0]
self.assertEqual(__snake_case , __snake_case )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__SCREAMING_SNAKE_CASE: Union[str, Any] = AutoTokenizer.from_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE: List[Any] = FalconForCausalLM.from_pretrained(__snake_case )
model.eval()
model.to(__snake_case )
__SCREAMING_SNAKE_CASE: Union[str, Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=4 )
model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=4 )
model.generate(**__snake_case , num_beams=2 , max_new_tokens=4 )
@slow
def snake_case_ ( self ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__SCREAMING_SNAKE_CASE: List[Any] = AutoTokenizer.from_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE: List[Any] = FalconForCausalLM.from_pretrained(__snake_case )
model.eval()
model.to(device=__snake_case )
__SCREAMING_SNAKE_CASE: Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
# Test results are the same with and without cache
__SCREAMING_SNAKE_CASE: List[str] = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=20 , use_cache=__snake_case )
__SCREAMING_SNAKE_CASE: str = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=20 , use_cache=__snake_case )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 202 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Union[str, Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
_a : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : Dict ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 471 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = ["image_processor"]
_A = "SamImageProcessor"
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__init__(snake_case__ )
A =self.image_processor
A =-10
A =self.image_processor.size["longest_edge"]
def __call__( self : Union[str, Any] , snake_case__ : Optional[int]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None , snake_case__ : Tuple=None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Dict , ):
"""simple docstring"""
A =self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# pop arguments that are not used in the foward but used nevertheless
A =encoding_image_processor["original_sizes"]
if hasattr(snake_case__ , "numpy" ): # Checks if Torch or TF tensor
A =original_sizes.numpy()
A , A , A =self._check_and_preprocess_points(
input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , )
A =self._normalize_and_convert(
snake_case__ , snake_case__ , input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , return_tensors=snake_case__ , )
return encoding_image_processor
def _a ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Dict="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(snake_case__ ) != len(snake_case__ ):
A =[
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] ) for point in input_points
]
else:
A =[
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ )
for point, original_size in zip(snake_case__ , snake_case__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
A , A =self._pad_points_and_labels(snake_case__ , snake_case__ )
A =np.array(snake_case__ )
if input_labels is not None:
A =np.array(snake_case__ )
if input_boxes is not None:
if len(snake_case__ ) != len(snake_case__ ):
A =[
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] , is_bounding_box=snake_case__ )
for box in input_boxes
]
else:
A =[
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ , is_bounding_box=snake_case__ )
for box, original_size in zip(snake_case__ , snake_case__ )
]
A =np.array(snake_case__ )
if input_boxes is not None:
if return_tensors == "pt":
A =torch.from_numpy(snake_case__ )
# boxes batch size of 1 by default
A =input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
A =tf.convert_to_tensor(snake_case__ )
# boxes batch size of 1 by default
A =tf.expand_dims(snake_case__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
A =torch.from_numpy(snake_case__ )
# point batch size of 1 by default
A =input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
A =tf.convert_to_tensor(snake_case__ )
# point batch size of 1 by default
A =tf.expand_dims(snake_case__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
A =torch.from_numpy(snake_case__ )
# point batch size of 1 by default
A =input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
A =tf.convert_to_tensor(snake_case__ )
# point batch size of 1 by default
A =tf.expand_dims(snake_case__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Dict ):
"""simple docstring"""
A =max([point.shape[0] for point in input_points] )
A =[]
for i, point in enumerate(snake_case__ ):
if point.shape[0] != expected_nb_points:
A =np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
A =np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(snake_case__ )
A =processed_input_points
return input_points, input_labels
def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : np.ndarray , snake_case__ : List[Any] , snake_case__ : Optional[Any]=False ):
"""simple docstring"""
A , A =original_size
A , A =self.image_processor._get_preprocess_shape(snake_case__ , longest_edge=snake_case__ )
A =deepcopy(snake_case__ ).astype(snake_case__ )
if is_bounding_box:
A =coords.reshape(-1 , 2 , 2 )
A =coords[..., 0] * (new_w / old_w)
A =coords[..., 1] * (new_h / old_h)
if is_bounding_box:
A =coords.reshape(-1 , 4 )
return coords
def _a ( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(snake_case__ , "numpy" ): # Checks for TF or Torch tensor
A =input_points.numpy().tolist()
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_points[0] , snake_case__ ):
raise ValueError("Input points must be a list of list of floating points." )
A =[np.array(snake_case__ ) for input_point in input_points]
else:
A =None
if input_labels is not None:
if hasattr(snake_case__ , "numpy" ):
A =input_labels.numpy().tolist()
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_labels[0] , snake_case__ ):
raise ValueError("Input labels must be a list of list integers." )
A =[np.array(snake_case__ ) for label in input_labels]
else:
A =None
if input_boxes is not None:
if hasattr(snake_case__ , "numpy" ):
A =input_boxes.numpy().tolist()
if (
not isinstance(snake_case__ , snake_case__ )
or not isinstance(input_boxes[0] , snake_case__ )
or not isinstance(input_boxes[0][0] , snake_case__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
A =[np.array(snake_case__ ).astype(np.floataa ) for box in input_boxes]
else:
A =None
return input_points, input_labels, input_boxes
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =self.image_processor.model_input_names
return list(dict.fromkeys(snake_case__ ) )
def _a ( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.image_processor.post_process_masks(*snake_case__ , **snake_case__ )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
# Function to print upper half of diamond (pyramid)
def A_ ( a ):
"""simple docstring"""
for i in range(0 , a ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def A_ ( a ):
"""simple docstring"""
for i in range(a , 0 , -1 ):
for _ in range(a , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def A_ ( a ):
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(a ) # upper half
reverse_floyd(a ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
lowerCAmelCase : str = 1
while K:
lowerCAmelCase : Any = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
lowerCAmelCase : Optional[Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 511 |
import cva
import numpy as np
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ : Any = k
SCREAMING_SNAKE_CASE_ : List[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = img.shape
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : Tuple = img.copy()
SCREAMING_SNAKE_CASE_ : int = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = np.gradient(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = dx**2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dy**2
SCREAMING_SNAKE_CASE_ : Optional[int] = dx * dy
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.04
SCREAMING_SNAKE_CASE_ : int = self.window_size // 2
for y in range(_SCREAMING_SNAKE_CASE , h - offset ):
for x in range(_SCREAMING_SNAKE_CASE , w - offset ):
SCREAMING_SNAKE_CASE_ : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : List[Any] = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ : List[str] = wxx + wyy
SCREAMING_SNAKE_CASE_ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase : List[Any] = HarrisCorner(0.04, 3)
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 511 | 1 |
'''simple docstring'''
import os
def _A (lowerCAmelCase__ :Any ) -> List[str]:
'''simple docstring'''
_a = len(grid[0] )
_a = len(lowerCAmelCase__ )
_a = 0
_a = 0
_a = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCAmelCase__ ):
for j in range(n_rows - 3 ):
_a = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_a = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_a = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_a = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_a = max(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if max_product > largest:
_a = max_product
return largest
def _A () -> List[Any]:
'''simple docstring'''
_a = []
with open(os.path.dirname(lowerCAmelCase__ ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
_a = [[int(lowerCAmelCase__ ) for i in grid[j]] for j in range(len(lowerCAmelCase__ ) )]
return largest_product(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 710 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """EncodecFeatureExtractor"""
_lowerCAmelCase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
super().__init__(__magic_name__ , __magic_name__ )
_a = self.feature_extractor
_a = False
def __UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=True ) -> Tuple:
return self.tokenizer.get_decoder_prompt_ids(task=__magic_name__ , language=__magic_name__ , no_timestamps=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__magic_name__ , **__magic_name__ )
_a = kwargs.pop('audio' , __magic_name__ )
_a = kwargs.pop('sampling_rate' , __magic_name__ )
_a = kwargs.pop('text' , __magic_name__ )
if len(__magic_name__ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
_a = self.tokenizer(__magic_name__ , **__magic_name__ )
if audio is not None:
_a = self.feature_extractor(__magic_name__ , *__magic_name__ , sampling_rate=__magic_name__ , **__magic_name__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_a = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_a = audio_inputs['padding_mask']
return inputs
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> Tuple:
_a = kwargs.pop('audio' , __magic_name__ )
_a = kwargs.pop('padding_mask' , __magic_name__ )
if len(__magic_name__ ) > 0:
_a = args[0]
_a = args[1:]
if audio_values is not None:
return self._decode_audio(__magic_name__ , padding_mask=__magic_name__ )
else:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[np.ndarray]:
_a = to_numpy(__magic_name__ )
_a , _a , _a = audio_values.shape
if padding_mask is None:
return list(__magic_name__ )
_a = to_numpy(__magic_name__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_a = seq_len - padding_mask.shape[-1]
_a = 1 - self.feature_extractor.padding_value
_a = np.pad(__magic_name__ , ((0, 0), (0, difference)) , 'constant' , constant_values=__magic_name__ )
_a = audio_values.tolist()
for i in range(__magic_name__ ):
_a = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_a = sliced_audio.reshape(__magic_name__ , -1 )
return audio_values
| 532 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case (__lowercase , __lowercase , __lowercase=None , __lowercase=None):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _a :
"""simple docstring"""
A_ = OPTConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=16 , _UpperCAmelCase=16 , ) -> Optional[int]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = embed_dim
UpperCamelCase_ = word_embed_proj_dim
UpperCamelCase_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_UpperCAmelCase , **self.config_updates , )
UpperCamelCase_ = prepare_opt_inputs_dict(_UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFOPTModel(config=_UpperCAmelCase )
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A_ = (TFOPTForCausalLM,) if is_tf_available() else ()
A_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
A_ = False
A_ = False
A_ = False
A_ = 10
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = TFOPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_UpperCAmelCase , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCamelCase_ = model_class(config=_UpperCAmelCase )
UpperCamelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_UpperCAmelCase )
UpperCamelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCamelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _UpperCAmelCase )
# check that weights remain the same after resizing
UpperCamelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(_UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _UpperCAmelCase )
UpperCamelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(_UpperCAmelCase )
def _snake_case (__lowercase):
return tf.constant(__lowercase , dtype=tf.intaa)
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = 99
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCamelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCamelCase_ = input_ids.shape[0]
UpperCamelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
UpperCamelCase_ = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCamelCase_ = tf.not_equal(_UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ).last_hidden_state
UpperCamelCase_ = (1, 11, 512)
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCamelCase_ = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4e-3 ) )
UpperCamelCase_ = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
UpperCamelCase_ = xla_generate(_UpperCAmelCase , _UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4e-2 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
super().setUp()
UpperCamelCase_ = 'facebook/opt-350m'
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCamelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
UpperCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCamelCase_ = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
UpperCamelCase_ = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
UpperCamelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Any:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = 'facebook/opt-125m'
UpperCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='tf' ).input_ids
UpperCamelCase_ = model.generate(_UpperCAmelCase , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = 'facebook/opt-350m'
UpperCamelCase_ = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = 'left'
# use different length sentences to test batching
UpperCamelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
UpperCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding=_UpperCAmelCase )
UpperCamelCase_ = inputs['input_ids']
UpperCamelCase_ = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs['attention_mask'] )
UpperCamelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase_ = model.generate(input_ids=_UpperCAmelCase )
UpperCamelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
UpperCamelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase_ = model.generate(input_ids=_UpperCAmelCase , max_length=model.config.max_length - num_paddings )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = 'facebook/opt-350m'
UpperCamelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='tf' ).input_ids
UpperCamelCase_ = model.generate(_UpperCAmelCase , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 23 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int =logging.get_logger(__name__)
class A_ ( __a ):
_A :Union[str, Any] = '''encoder-decoder'''
_A :List[str] = True
def __init__( self : Any , **snake_case__ : int ):
super().__init__(**snake_case__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase = kwargs.pop("""encoder""" )
lowercase = encoder_config.pop("""model_type""" )
lowercase = kwargs.pop("""decoder""" )
lowercase = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowercase = AutoConfig.for_model(snake_case__ , **snake_case__ )
lowercase = AutoConfig.for_model(snake_case__ , **snake_case__ )
lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Any ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
lowercase = True
lowercase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.encoder.to_dict()
lowercase = self.decoder.to_dict()
lowercase = self.__class__.model_type
return output
| 712 |
import argparse
import os
import re
import packaging.version
__SCREAMING_SNAKE_CASE : Optional[int] ='''examples/'''
__SCREAMING_SNAKE_CASE : Any ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__SCREAMING_SNAKE_CASE : Any ='''README.md'''
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
lowercase = f.read()
lowercase , lowercase = REPLACE_PATTERNS[pattern]
lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ )
lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ):
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def UpperCamelCase__ ( ):
lowercase = """🤗 Transformers currently provides the following architectures"""
lowercase = """1. Want to contribute a new model?"""
with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
lowercase = f.readlines()
# Find the start of the list.
lowercase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,)
index += 1
with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
def UpperCamelCase__ ( ):
with open(REPLACE_FILES["""init"""] ,"""r""" ) as f:
lowercase = f.read()
lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__=False ):
lowercase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase = default_version.base_version
elif patch:
lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(lowerCAmelCase__ ) == 0:
lowercase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def UpperCamelCase__ ( ):
lowercase = get_version()
lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase = current_version.base_version
# Check with the user we got that right.
lowercase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCAmelCase__ ) == 0:
lowercase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase__ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 72 | 0 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ : Any = """pt"""
elif is_tf_available():
__magic_name__ : Optional[Any] = """tf"""
else:
__magic_name__ : int = """jax"""
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Any = PerceiverTokenizer
__lowerCAmelCase : int = False
def _a ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def _a ( self , **_A ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _a ( self , _A , _A=False , _A=2_0 , _A=5 ):
'''simple docstring'''
UpperCamelCase : Tuple = []
for i in range(len(_A ) ):
try:
UpperCamelCase : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCamelCase : Dict = list(filter(lambda _A : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , _A ) )
UpperCamelCase : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
UpperCamelCase : int = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
UpperCamelCase : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase : Any = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase : List[str] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
UpperCamelCase : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
UpperCamelCase : Dict = """ """ + output_txt
UpperCamelCase : List[Any] = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.perceiver_tokenizer
UpperCamelCase : str = """Unicode €."""
UpperCamelCase : Dict = tokenizer(_A )
UpperCamelCase : Optional[Any] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["""input_ids"""] , _A )
# decoding
UpperCamelCase : Optional[int] = tokenizer.decode(_A )
self.assertEqual(_A , """[CLS]Unicode €.[SEP]""" )
UpperCamelCase : str = tokenizer("""e è é ê ë""" )
UpperCamelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["""input_ids"""] , _A )
# decoding
UpperCamelCase : Any = tokenizer.decode(_A )
self.assertEqual(_A , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.perceiver_tokenizer
UpperCamelCase : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
UpperCamelCase : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCamelCase : Dict = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
UpperCamelCase : Dict = list(batch.input_ids.numpy()[0] )
else:
UpperCamelCase : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.perceiver_tokenizer
UpperCamelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase : Any = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _A )
self.assertIn("""attention_mask""" , _A )
self.assertNotIn("""decoder_input_ids""" , _A )
self.assertNotIn("""decoder_attention_mask""" , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.perceiver_tokenizer
UpperCamelCase : int = [
"""Summary of the text.""",
"""Another summary.""",
]
UpperCamelCase : int = tokenizer(
text_target=_A , max_length=3_2 , padding="""max_length""" , truncation=_A , return_tensors=_A )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase : int = tempfile.mkdtemp()
UpperCamelCase : Tuple = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase : Dict = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
UpperCamelCase : Any = tokenizer.__class__.from_pretrained(_A )
UpperCamelCase : Tuple = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
UpperCamelCase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase : List[Any] = tempfile.mkdtemp()
UpperCamelCase : Union[str, Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
UpperCamelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCamelCase : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
UpperCamelCase : List[str] = tokenizer.__class__.from_pretrained(_A )
UpperCamelCase : Tuple = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCamelCase : Any = tokenizer.__class__.from_pretrained(_A , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase : List[Any] = json.load(_A )
UpperCamelCase : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
UpperCamelCase : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
UpperCamelCase : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_A , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase : Tuple = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase : List[str] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_A )]
UpperCamelCase : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , """�""" )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase : Optional[Any] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
UpperCamelCase : Dict = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
| 102 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 714 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase__ :int = 'src/transformers'
lowercase__ :List[str] = 'docs/source/en/tasks'
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
"""simple docstring"""
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Any = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[Any] = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :Any = direct_transformers_import(TRANSFORMERS_PATH)
lowercase__ :List[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase__ :Union[str, Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCAmelCase : Dict = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase_ , set() )
__UpperCAmelCase : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=False ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__UpperCAmelCase : List[str] = get_model_list_for_task(UpperCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
lowercase__ :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 374 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 314 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class a_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe.dual_guided(
prompt="""first prompt""" , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = generator.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe.dual_guided(
prompt="""first prompt""" , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = """cyberpunk 2077"""
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger """
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_SCREAMING_SNAKE_CASE = pipe.image_variation(A , generator=A , output_type="""numpy""" ).images
_SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 314 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 4_00_00_00 ) -> List[Any]:
_snake_case = [0, 1]
_snake_case = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_snake_case = 0
for j in range(len(__lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 705 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Any:
_snake_case = [False] * len(__lowerCamelCase )
_snake_case = [-1] * len(__lowerCamelCase )
def dfs(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
_snake_case = True
_snake_case = c
for u in graph[v]:
if not visited[u]:
dfs(__lowerCamelCase , 1 - c )
for i in range(len(__lowerCamelCase ) ):
if not visited[i]:
dfs(__lowerCamelCase , 0 )
for i in range(len(__lowerCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 430 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : Tuple , a : Dict=None , **a : str ):
'''simple docstring'''
super().__init__(features=a )
lowercase_ : Dict = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase__ ( self : int , a : int ):
'''simple docstring'''
import torch
if isinstance(a , a ) and column:
if all(
isinstance(a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a )
return column
def lowerCAmelCase__ ( self : Any , a : Any ):
'''simple docstring'''
import torch
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : int = {"dtype": torch.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
lowercase_ : List[Any] = np.asarray(a )
return torch.tensor(a , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[Any] ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a , "__array__" ) and not isinstance(a , torch.Tensor ):
lowercase_ : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def lowerCAmelCase__ ( self : List[str] , a : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a , map_list=a )
def lowerCAmelCase__ ( self : int , a : pa.Table ):
'''simple docstring'''
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_row(a )
lowercase_ : int = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def lowerCAmelCase__ ( self : str , a : pa.Table ):
'''simple docstring'''
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(a )
lowercase_ : Tuple = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
lowercase_ : List[str] = self.recursive_tensorize(a )
lowercase_ : str = self._consolidate(a )
return column
def lowerCAmelCase__ ( self : Optional[int] , a : pa.Table ):
'''simple docstring'''
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(a )
lowercase_ : List[Any] = self.python_features_decoder.decode_batch(a )
lowercase_ : Dict = self.recursive_tensorize(a )
for column_name in batch:
lowercase_ : Optional[int] = self._consolidate(batch[column_name] )
return batch
| 620 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = 2
lowercase_ : Tuple = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCamelCase )
if n > 1:
factors.append(_UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = FunnelConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 720 | '''simple docstring'''
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =(
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCAmelCase_ ( snake_case_ : str = N ) -> int:
'''simple docstring'''
UpperCAmelCase_ = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
UpperCAmelCase_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
UpperCAmelCase_ = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 415 | 0 |
import unittest
from knapsack import knapsack as k
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = [0]
lowerCamelCase = [0]
lowerCamelCase = len(A )
self.assertEqual(k.knapsack(A , A , A , A ) , 0 )
lowerCamelCase = [60]
lowerCamelCase = [10]
lowerCamelCase = len(A )
self.assertEqual(k.knapsack(A , A , A , A ) , 0 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = 3
lowerCamelCase = [1, 2, 3]
lowerCamelCase = [3, 2, 1]
lowerCamelCase = len(A )
self.assertEqual(k.knapsack(A , A , A , A ) , 5 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = 50
lowerCamelCase = [60, 1_00, 1_20]
lowerCamelCase = [10, 20, 30]
lowerCamelCase = len(A )
self.assertEqual(k.knapsack(A , A , A , A ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 457 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase : Any = random.Random()
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase = global_rng
lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=24 , A=24 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = min_seq_length
lowerCamelCase = max_seq_length
lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase = feature_size
lowerCamelCase = num_mel_bins
lowerCamelCase = padding_value
lowerCamelCase = sampling_rate
lowerCamelCase = return_attention_mask
lowerCamelCase = do_normalize
def __A ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A=False , A=False ) -> Tuple:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = SpeechaTextFeatureExtractionTester(self )
def __A ( self , A ) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase = feature_extractor(A , padding=A , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCamelCase = np.asarray(A )
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , padding=A , max_length=A , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , max_length=A , padding=A , return_tensors="""np""" , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""max_length""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=16 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A ) -> Any:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase = self._load_datasamples(1 )
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = feature_extractor(A , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1e-4 ) )
| 457 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A : int = TypeVar("""T""")
class lowerCAmelCase_ ( Generic[T] ):
__UpperCAmelCase = 42 # Cache store of keys
__UpperCAmelCase = 42 # References of the keys in cache
__UpperCAmelCase = 10 # Maximum capacity of cache
def __init__( self : Optional[int], _snake_case : int ):
'''simple docstring'''
snake_case : str =deque()
snake_case : Tuple =set()
if not n:
snake_case : str =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
snake_case : int =n
def __snake_case ( self : str, _snake_case : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case : Union[str, Any] =self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(_snake_case )
def __repr__( self : Tuple ):
'''simple docstring'''
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
A : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 711 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Dict =[0] * no_of_processes
snake_case : Dict =[0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase_ ):
snake_case : Any =burst_time[i]
snake_case : list[int] =[]
snake_case : Optional[Any] =0
snake_case : List[Any] =0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case : Union[str, Any] =[]
snake_case : Optional[int] =-1
for i in range(lowerCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
snake_case : Any =ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case : str =i
total_time += burst_time[target_process]
completed += 1
snake_case : Dict =0
snake_case : Dict =(
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[int] =[0] * no_of_processes
for i in range(lowerCamelCase_ ):
snake_case : Optional[int] =burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
A : Optional[Any] = 4
A : Dict = [2, 5, 3, 7]
A : Any = [0, 0, 0, 0]
A : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(f"\nAverage waiting time = {mean(waiting_time):.5f}")
print(f"Average turnaround time = {mean(turn_around_time):.5f}")
| 136 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( lowerCAmelCase__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''ViltImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Tuple , _lowercase : Optional[int]=None , _lowercase : List[str]=None , **_lowercase : Optional[Any] ):
A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
A = kwargs.pop('feature_extractor' )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
A = self.image_processor
def __call__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : List[Any] = None , _lowercase : List[str] = True , _lowercase : Dict = False , _lowercase : List[str] = None , _lowercase : Union[str, Any] = None , _lowercase : int = 0 , _lowercase : Any = None , _lowercase : List[str] = None , _lowercase : Dict = None , _lowercase : Optional[Any] = False , _lowercase : Any = False , _lowercase : Dict = False , _lowercase : Union[str, Any] = False , _lowercase : Tuple = True , _lowercase : List[str] = None , **_lowercase : Any , ):
A = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel_values + pixel_mask
A = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def __a ( self : List[str] , *_lowercase : Tuple , **_lowercase : List[str] ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __a ( self : Optional[Any] , *_lowercase : Tuple , **_lowercase : int ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __a ( self : Union[str, Any] ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __a ( self : int ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def __a ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 690 | '''simple docstring'''
from functools import lru_cache
@lru_cache
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
| 462 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''realm'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=1e-3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=3_2_0 , __SCREAMING_SNAKE_CASE=1_3_3_5_3_7_1_8 , __SCREAMING_SNAKE_CASE=5_0_0_0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Common config
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[str] = hidden_size
UpperCamelCase__ : Union[str, Any] = retriever_proj_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : str = num_candidates
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : List[Any] = layer_norm_eps
# Reader config
UpperCamelCase__ : List[Any] = span_hidden_size
UpperCamelCase__ : List[Any] = max_span_width
UpperCamelCase__ : Optional[Any] = reader_layer_norm_eps
UpperCamelCase__ : Optional[Any] = reader_beam_size
UpperCamelCase__ : int = reader_seq_len
# Retrieval config
UpperCamelCase__ : List[str] = num_block_records
UpperCamelCase__ : Union[str, Any] = searcher_beam_size
| 462 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Optional[Any] = get_failure_array(snake_case__ )
# 2) Step through text searching for pattern
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0, 0 # index into text, pattern
while i < len(snake_case__ ):
if pattern[j] == text[i]:
if j == (len(snake_case__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def A_ ( __SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = [0]
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[Any] = 1
while j < len(snake_case__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__SCREAMING_SNAKE_CASE : Dict = failure[i - 1]
continue
j += 1
failure.append(snake_case__ )
return failure
if __name__ == "__main__":
# Test 1)
_A = """abc1abc12"""
_A = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_A = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_A = """ABABX"""
_A = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
_A = """AAAB"""
_A = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
_A = """abcdabcy"""
_A = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
_A = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 158 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = '▁'
__a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__a = {
'google/pegasus-xsum': 5_1_2,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = VOCAB_FILES_NAMES
a :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :List[Any] = PegasusTokenizer
a :Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask_2>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<mask_1>" , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0_3 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[int]:
lowercase_ = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE_ )}, but is'''
f''' {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(SCREAMING_SNAKE_CASE_ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase_ = additional_special_tokens_extended
else:
lowercase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , mask_token_sent=SCREAMING_SNAKE_CASE_ , offset=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
lowercase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List , SCREAMING_SNAKE_CASE_ : Optional[List] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 409 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a = 5_0_0_0_0
__a = 5_0_0_0
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: Tuple ):
'''simple docstring'''
for i in range(snake_case__ ):
lowercase_ = dataset[i]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: List[str] , snake_case__: Tuple ):
'''simple docstring'''
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = dataset[i : i + batch_size]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: Tuple , snake_case__: Optional[Any] ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case__ ):
for i in range(snake_case__ ):
lowercase_ = dataset[i]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: List[Any] , snake_case__: Dict , snake_case__: Any ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case__ ):
for i in range(0 , snake_case__ , snake_case__ ):
lowercase_ = dataset[i : i + batch_size]
def a ( ):
'''simple docstring'''
lowercase_ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
lowercase_ = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
lowercase_ = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
lowercase_ = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
lowercase_ = generate_example_dataset(
os.path.join(snake_case__ , '''dataset.arrow''' ) , snake_case__ , num_examples=snake_case__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(snake_case__ ) )
lowercase_ = func(snake_case__ , **snake_case__ )
print('''shuffling dataset''' )
lowercase_ = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(snake_case__ ) )
lowercase_ = func(
snake_case__ , **snake_case__ )
with open(snake_case__ , '''wb''' ) as f:
f.write(json.dumps(snake_case__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 409 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCamelCase : Optional[int] =logging.get_logger(__name__)
def a__ (__lowercase :List[Any] , __lowercase :Dict , __lowercase :str ) -> Any:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def a__ (__lowercase :Union[str, Any] , __lowercase :List[Any] , __lowercase :Optional[Any] ) -> List[Any]:
_A : Optional[int] = to_pil_image(lowerCamelCase_ )
_A : List[Any] = pil_image.size
_A : Tuple = pytesseract.image_to_data(lowerCamelCase_ , lang=lowerCamelCase_ , output_type='''dict''' , config=lowerCamelCase_ )
_A : Optional[Any] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_A : List[Any] = [idx for idx, word in enumerate(lowerCamelCase_ ) if not word.strip()]
_A : Any = [word for idx, word in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : Union[str, Any] = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : Dict = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : List[str] = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : Tuple = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_A : str = []
for x, y, w, h in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_A : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase_ )
# finally, normalize the bounding boxes
_A : Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase__ ( _a ):
__snake_case : Tuple = ["""pixel_values"""]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BILINEAR ,A__ = True ,A__ = 1 / 255 ,A__ = True ,A__ = None ,A__ = None ,A__ = True ,A__ = None ,A__ = "" ,**A__ ,):
super().__init__(**A__ )
_A : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224}
_A : Dict = get_size_dict(A__ )
_A : int = do_resize
_A : Any = size
_A : List[Any] = resample
_A : Optional[Any] = do_rescale
_A : Optional[Any] = rescale_value
_A : Dict = do_normalize
_A : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_A : List[str] = apply_ocr
_A : Any = ocr_lang
_A : Optional[int] = tesseract_config
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BILINEAR ,A__ = None ,**A__ ,):
_A : List[Any] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
_A : Any = (size['height'], size['width'])
return resize(A__ ,size=A__ ,resample=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__=None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
_A : int = do_resize if do_resize is not None else self.do_resize
_A : int = size if size is not None else self.size
_A : int = get_size_dict(A__ )
_A : Tuple = resample if resample is not None else self.resample
_A : int = do_rescale if do_rescale is not None else self.do_rescale
_A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_A : Any = image_mean if image_mean is not None else self.image_mean
_A : Optional[int] = image_std if image_std is not None else self.image_std
_A : int = apply_ocr if apply_ocr is not None else self.apply_ocr
_A : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
_A : Any = tesseract_config if tesseract_config is not None else self.tesseract_config
_A : str = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_A : List[Any] = [to_numpy_array(A__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,'''pytesseract''' )
_A : Optional[Any] = []
_A : Tuple = []
for image in images:
_A : Optional[int] = apply_tesseract(A__ ,A__ ,A__ )
words_batch.append(A__ )
boxes_batch.append(A__ )
if do_resize:
_A : Tuple = [self.resize(image=A__ ,size=A__ ,resample=A__ ) for image in images]
if do_rescale:
_A : str = [self.rescale(image=A__ ,scale=A__ ) for image in images]
if do_normalize:
_A : str = [self.normalize(image=A__ ,mean=A__ ,std=A__ ) for image in images]
_A : List[str] = [to_channel_dimension_format(A__ ,A__ ) for image in images]
_A : List[Any] = BatchFeature(data={'''pixel_values''': images} ,tensor_type=A__ )
if apply_ocr:
_A : List[str] = words_batch
_A : Tuple = boxes_batch
return data
| 206 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ : Tuple = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def __lowercase ( _a , _a , _a=8 ):
snake_case_ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Tuple , lowercase_ : UNetaDConditionModel , lowercase_ : DDPMScheduler , lowercase_ : VQModel , ):
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
snake_case_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple ):
if latents is None:
snake_case_ : int = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case_ : Optional[int] = latents.to(lowercase_ )
snake_case_ : Any = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self : Optional[int] , lowercase_ : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case_ : int = torch.device(f"cuda:{gpu_id}" )
snake_case_ : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def _snake_case ( self : Any , lowercase_ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_ : List[Any] = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
snake_case_ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : Optional[Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : Any , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : torch.FloatTensor , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 100 , lowercase_ : float = 4.0 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
snake_case_ : List[str] = self._execution_device
snake_case_ : str = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Any = torch.cat(lowercase_ , dim=0 )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Union[str, Any] = torch.cat(lowercase_ , dim=0 )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Union[str, Any] = torch.cat(lowercase_ , dim=0 )
snake_case_ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case_ : List[Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 )
snake_case_ : Tuple = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
snake_case_ : str = hint.repeat_interleave(lowercase_ , dim=0 )
snake_case_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
snake_case_ : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
snake_case_ : Optional[Any] = self.scheduler.timesteps
snake_case_ : int = self.movq.config.latent_channels
snake_case_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
snake_case_ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : str = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case_ : List[str] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
snake_case_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ : Union[str, Any] = noise_pred.chunk(2 )
snake_case_ : List[str] = variance_pred.chunk(2 )
snake_case_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : str = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
snake_case_ : Union[str, Any] = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case_ : Optional[int] = image * 0.5 + 0.5
snake_case_ : Optional[Any] = image.clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 711 |
"""simple docstring"""
from statistics import mean, stdev
def __lowercase ( _a , _a = 3 ):
snake_case_ : Optional[int] = min(_a )
snake_case_ : str = max(_a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _a ) for x in data]
def __lowercase ( _a , _a = 3 ):
snake_case_ : Any = mean(_a )
snake_case_ : str = stdev(_a )
# standardize data
return [round((x - mu) / (sigma) , _a ) for x in data]
| 485 | 0 |
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = text, pattern
UpperCamelCase_ , UpperCamelCase_ = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCAmelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
UpperCamelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase_ = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
UpperCamelCase_ = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : List[str] = """ABAABA"""
snake_case__ : Tuple = """AB"""
snake_case__ : Tuple = BoyerMooreSearch(text, pattern)
snake_case__ : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 23 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar('KT')
__lowerCAmelCase = TypeVar('VT')
class _lowerCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__(self , UpperCAmelCase = "root" , UpperCAmelCase = None ) -> Tuple:
_snake_case = key
_snake_case = value
_snake_case = []
def __repr__(self ) -> str:
return f"""Node({self.key}: {self.value})"""
@property
def lowercase (self ) -> int:
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__(self , UpperCAmelCase = 0.5 , UpperCAmelCase = 16 ) -> int:
_snake_case = Node[KT, VT]()
_snake_case = 0
_snake_case = p
_snake_case = max_level
def __str__(self ) -> str:
_snake_case = list(self )
if len(UpperCAmelCase ) == 0:
return f"""SkipList(level={self.level})"""
_snake_case = max((len(str(UpperCAmelCase ) ) for item in items) , default=4 )
_snake_case = max(UpperCAmelCase , 4 ) + 4
_snake_case = self.head
_snake_case = []
_snake_case = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(UpperCAmelCase , """-""" ) + """* """ * len(UpperCAmelCase ) )
lines.append(""" """ * label_size + """| """ * len(UpperCAmelCase ) )
while len(node.forward ) != 0:
_snake_case = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(UpperCAmelCase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(UpperCAmelCase ) )
_snake_case = node.forward
lines.append("""None""".ljust(UpperCAmelCase ) + """* """ * len(UpperCAmelCase ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(UpperCAmelCase )
def __iter__(self ) -> Any:
_snake_case = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_snake_case = node.forward[0]
def lowercase (self ) -> int:
_snake_case = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowercase (self , UpperCAmelCase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
_snake_case = []
_snake_case = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_snake_case = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case, _snake_case = self._locate_node(UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_snake_case = node.forward[i]
else:
_snake_case = update_node.forward[:i]
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case, _snake_case = self._locate_node(UpperCAmelCase )
if node is not None:
_snake_case = value
else:
_snake_case = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , UpperCAmelCase ):
update_vector.append(self.head )
_snake_case = level
_snake_case = Node(UpperCAmelCase , UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(UpperCAmelCase )
else:
_snake_case = new_node
def lowercase (self , UpperCAmelCase ) -> VT | None:
_snake_case, _snake_case = self._locate_node(UpperCAmelCase )
if node is not None:
return node.value
return None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
assert skip_list.find("""Some key""" ) is None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __SCREAMING_SNAKE_CASE ( ):
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
_snake_case = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 585 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 701 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ):
snake_case_ = FunnelTokenizer
snake_case_ = FunnelTokenizerFast
snake_case_ = True
snake_case_ = True
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_: int = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **A__ ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **A__ )
def snake_case_ ( self , **A__ ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = "UNwant\u00E9d,running"
UpperCAmelCase_: Dict = "unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_: Dict = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 10, 8, 9] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
UpperCAmelCase_: Any = tokenizer("UNwant\u00E9d,running" )
UpperCAmelCase_: str = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCAmelCase_: Tuple = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len ) | 306 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for line in lines:
_lowerCamelCase : List[str] = re.sub(r"#.*" , "" , _lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "\n".join(_lowerCAmelCase )
# Make a hash from all this code
_lowerCamelCase : Tuple = full_str.encode("utf-8" )
return shaaaa(_lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : int = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Union[str, Any] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Dict = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip') | 44 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_SCREAMING_SNAKE_CASE : Optional[Any] = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_SCREAMING_SNAKE_CASE : Dict = concatenate_datasets
_SCREAMING_SNAKE_CASE : Optional[Any] = DownloadConfig
_SCREAMING_SNAKE_CASE : Dict = DownloadManager
_SCREAMING_SNAKE_CASE : str = DownloadMode
_SCREAMING_SNAKE_CASE : Dict = DownloadConfig
_SCREAMING_SNAKE_CASE : Optional[int] = DownloadMode
_SCREAMING_SNAKE_CASE : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 493 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,__SCREAMING_SNAKE_CASE ,)
super().__init__(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
| 220 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = 0
A = False
A = 3.0
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() ,{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() ,{'a': 2} )
self.assertDictEqual(MockClass(a=2 ,b=__SCREAMING_SNAKE_CASE ).to_kwargs() ,{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 ,c=2.25 ).to_kwargs() ,{'a': 2, 'c': 2.25} )
@require_cuda
def __a ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE : Union[str, Any] = GradScalerKwargs(init_scale=1024 ,growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(mixed_precision='fp16' ,kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE : int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale ,1024.0 )
self.assertEqual(scaler._growth_factor ,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor ,0.5 )
self.assertEqual(scaler._growth_interval ,2000 )
self.assertEqual(scaler._enabled ,__SCREAMING_SNAKE_CASE )
@require_multi_gpu
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE ,env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCAmelCase = torch.nn.Linear(100, 200)
__UpperCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCAmelCase = ''
__UpperCAmelCase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 220 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case =logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , **UpperCAmelCase__ : Optional[int] ) -> int:
super().__init__(**UpperCAmelCase__ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Union[str, Any] , UpperCAmelCase__ : Union[np.ndarray, bytes, str] , **UpperCAmelCase__ : Any ) -> Dict:
return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowerCAmelCase = {}
if "candidate_labels" in kwargs:
lowerCAmelCase = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCAmelCase = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Any="This is a sound of {}." ) -> Union[str, Any]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase = requests.get(UpperCAmelCase__ ).content
else:
with open(UpperCAmelCase__ , 'rb' ) as f:
lowerCAmelCase = f.read()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = ffmpeg_read(UpperCAmelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(UpperCAmelCase__ , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
lowerCAmelCase = candidate_labels
lowerCAmelCase = [hypothesis_template.format(UpperCAmelCase__ ) for x in candidate_labels]
lowerCAmelCase = self.tokenizer(UpperCAmelCase__ , return_tensors=self.framework , padding=UpperCAmelCase__ )
lowerCAmelCase = [text_inputs]
return inputs
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = model_inputs.pop('candidate_labels' )
lowerCAmelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCAmelCase__ ):
lowerCAmelCase = text_inputs[0]
else:
# Batching case.
lowerCAmelCase = text_inputs[0][0]
lowerCAmelCase = self.model(**UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[str] ) -> Tuple:
lowerCAmelCase = model_outputs.pop('candidate_labels' )
lowerCAmelCase = model_outputs['logits'][0]
if self.framework == "pt":
lowerCAmelCase = logits.softmax(dim=0 )
lowerCAmelCase = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
lowerCAmelCase = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase__ , UpperCAmelCase__ ) , key=lambda UpperCAmelCase__ : -x[0] )
]
return result
| 133 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__snake_case ="""http://www.mocksite.com/file1.txt"""
__snake_case ="""\"text\": [\"foo\", \"foo\"]"""
__snake_case ="""6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class UpperCAmelCase_ :
lowerCamelCase : Dict = 200
lowerCamelCase : Optional[Any] = {'''Content-Length''': '''100'''}
lowerCamelCase : List[str] = {}
def __UpperCAmelCase ( self : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
return [bytes(UpperCAmelCase__ , 'utf-8' )]
def a_ ( *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ):
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def a_ ( lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
import requests
monkeypatch.setattr(lowerCamelCase , 'request' , lowerCamelCase )
lowerCAmelCase = URL
if issubclass(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = url
elif issubclass(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = [url]
elif issubclass(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = {'train': url}
lowerCAmelCase = 'dummy'
lowerCAmelCase = 'downloads'
lowerCAmelCase = tmp_path
lowerCAmelCase = DownloadConfig(
cache_dir=os.path.join(lowerCamelCase , lowerCamelCase ) , use_etag=lowerCamelCase , )
lowerCAmelCase = DownloadManager(dataset_name=lowerCamelCase , download_config=lowerCamelCase )
lowerCAmelCase = dl_manager.download(lowerCamelCase )
lowerCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = [downloaded_paths]
lowerCAmelCase = [urls]
elif isinstance(lowerCamelCase , lowerCamelCase ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase = downloaded_paths.values()
lowerCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCamelCase , lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase = Path(lowerCamelCase )
lowerCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
lowerCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : str ):
lowerCAmelCase = str(lowerCamelCase )
if issubclass(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = filename
elif issubclass(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = [filename]
elif issubclass(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = {'train': filename}
lowerCAmelCase = 'dummy'
lowerCAmelCase = xz_file.parent
lowerCAmelCase = 'extracted'
lowerCAmelCase = DownloadConfig(
cache_dir=lowerCamelCase , use_etag=lowerCamelCase , )
lowerCAmelCase = DownloadManager(dataset_name=lowerCamelCase , download_config=lowerCamelCase )
lowerCAmelCase = dl_manager.extract(lowerCamelCase )
lowerCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = [extracted_paths]
lowerCAmelCase = [paths]
elif isinstance(lowerCamelCase , lowerCamelCase ):
assert "train" in extracted_paths.keys()
lowerCAmelCase = extracted_paths.values()
lowerCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCamelCase , lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase = Path(lowerCamelCase )
lowerCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCamelCase , etag=lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase = extracted_path.read_text()
lowerCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple ):
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCamelCase , start=1 ):
lowerCAmelCase = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
lowerCAmelCase = request.getfixturevalue(lowerCamelCase )
lowerCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase ) , start=1 ):
_test_jsonl(lowerCamelCase , lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ):
lowerCAmelCase = request.getfixturevalue(lowerCamelCase )
lowerCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCamelCase ) , start=1 ):
_test_jsonl(lowerCamelCase , lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def a_ ( lowerCamelCase : List[Any] ):
lowerCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCamelCase ) , start=1 ):
assert os.path.basename(lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 133 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase_ : Union[str, Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCAmelCase_ : Any = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
UpperCAmelCase_ : Union[str, Any] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase_ : Union[str, Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCAmelCase_ : List[str] = "allenai"
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = dict((re.sub(R"@@$" , "" , _A ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , _A ), v) for k, v in d.items() )
A__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
A__ = d[k] # restore
return da
def UpperCamelCase ( _A : str , _A : Dict )-> Dict:
"""simple docstring"""
assert os.path.exists(_A )
os.makedirs(_A , exist_ok=_A )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
A__ = basename(_A )
A__ = dirname(_A )
A__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A__ = cls.hub_models()
A__ = {"bpe": "fastbpe", "tokenizer": "moses"}
A__ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
A__ = hub_utils.from_pretrained(
_A , _A , _A , archive_map=_A , **_A )
A__ = vars(chkpt["args"]["model"] )
A__ = args["source_lang"]
A__ = args["target_lang"]
A__ = dirname(_A )
A__ = basename(_A )
# dicts
A__ = os.path.join(_A , f"""dict.{src_lang}.txt""" )
A__ = os.path.join(_A , f"""dict.{tgt_lang}.txt""" )
A__ = Dictionary.load(_A )
A__ = rewrite_dict_keys(src_dict.indices )
A__ = len(_A )
A__ = os.path.join(_A , "vocab-src.json" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A__ = True
for k in src_vocab.keys():
if not k.islower():
A__ = False
break
A__ = Dictionary.load(_A )
A__ = rewrite_dict_keys(tgt_dict.indices )
A__ = len(_A )
A__ = os.path.join(_A , "vocab-tgt.json" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# merges_file (bpecodes)
A__ = os.path.join(_A , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A__ = os.path.join(_A , _A )
if os.path.exists(_A ):
break
with open(_A , encoding="utf-8" ) as fin:
A__ = fin.read()
A__ = re.sub(R" \d+$" , "" , _A , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_A , "w" , encoding="utf-8" ) as fout:
fout.write(_A )
# model config
A__ = os.path.join(_A , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
A__ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
A__ = 5
A__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A__ = best_score_hparams[model_dir]["length_penalty"]
else:
A__ = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# tokenizer config
A__ = os.path.join(_A , _A )
A__ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# model
A__ = chkpt["models"][0]
A__ = model.state_dict()
# rename keys to start with 'model.'
A__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
A__ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(_A , _A )
A__ = FSMTConfig.from_pretrained(_A )
A__ = FSMTForConditionalGeneration(_A )
# check that it loads ok
model_new.load_state_dict(_A , strict=_A )
# save
A__ = os.path.join(_A , _A )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_A , _A )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 232 |
def UpperCamelCase ( _A : list[int] , _A : int )-> bool:
"""simple docstring"""
A__ = len(_A )
A__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A__ = subset[i - 1][j]
if arr[i - 1] <= j:
A__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232 | 1 |
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ):
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE : Tuple = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE : str = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : List[str] = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE : Union[str, Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE : Any = ''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE : Tuple = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
snake_case_ = """AGGTAB"""
snake_case_ = """GXTXAYB"""
snake_case_ = 4
snake_case_ = """GTAB"""
snake_case_ , snake_case_ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 507 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :bool = True ):
SCREAMING_SNAKE_CASE : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : int = model
SCREAMING_SNAKE_CASE : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , '''forward''' )
SCREAMING_SNAKE_CASE : Tuple = model.__dict__.pop('''_original_forward''' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE : Tuple = forward
if getattr(_SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : List[Any] = model
SCREAMING_SNAKE_CASE : int = compiled_model
return model
def __lowercase ():
PartialState().wait_for_everyone()
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Union[str, Any] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __lowercase (**_SCREAMING_SNAKE_CASE :str ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE : str = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
if not hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
SCREAMING_SNAKE_CASE : str = getattr(_SCREAMING_SNAKE_CASE , '''__class__''' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :str ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Dict = value
return destination
def __lowercase (_SCREAMING_SNAKE_CASE :int = None ):
if port is None:
SCREAMING_SNAKE_CASE : Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 507 | 1 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __snake_case ( lowercase : List[str] , lowercase : str=7 ):
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ = "636036"
snake_case_ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ = requests.get(lowercase , headers=lowercase ).json()
return result["workflow_runs"]
def __snake_case ( lowercase : List[Any] ):
snake_case_ = get_daily_ci_runs(lowercase )
snake_case_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ = workflow_run["id"]
break
return workflow_run_id
def __snake_case ( lowercase : Dict , lowercase : Tuple , lowercase : List[Any] ):
snake_case_ = get_last_daily_ci_runs(lowercase )
if workflow_run_id is not None:
snake_case_ = get_artifacts_links(worflow_run_id=lowercase , token=lowercase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase , artifact_url=lowercase , output_dir=lowercase , token=lowercase )
def __snake_case ( lowercase : List[str] , lowercase : str , lowercase : Dict ):
get_last_daily_ci_artifacts(lowercase , lowercase , lowercase )
snake_case_ = {}
for artifact_name in artifact_names:
snake_case_ = os.path.join(lowercase , f'''{artifact_name}.zip''' )
if os.path.isfile(lowercase ):
snake_case_ = {}
with zipfile.ZipFile(lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase ):
# read the file
with z.open(lowercase ) as f:
snake_case_ = f.read().decode("UTF-8" )
return results
| 704 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """encoder-decoder"""
snake_case = True
def __init__( self , **UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ = kwargs.pop("encoder" )
snake_case_ = encoder_config.pop("model_type" )
snake_case_ = kwargs.pop("decoder" )
snake_case_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
snake_case_ = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = True
@classmethod
def _lowercase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
snake_case_ = True
snake_case_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.encoder.to_dict()
snake_case_ = self.decoder.to_dict()
snake_case_ = self.__class__.model_type
return output
| 420 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ :
def __init__( self : Union[str, Any] ,a__ : List[str] ,a__ : List[str]=2 ,a__ : Any=3 ,a__ : Dict=4 ,a__ : Tuple=2 ,a__ : Tuple=7 ,a__ : Optional[Any]=True ,a__ : Any=True ,a__ : Optional[Any]=True ,a__ : List[Any]=True ,a__ : List[Any]=99 ,a__ : Optional[int]=36 ,a__ : Optional[int]=2 ,a__ : Dict=4 ,a__ : int=37 ,a__ : Optional[int]="gelu" ,a__ : Tuple=0.1 ,a__ : List[str]=0.1 ,a__ : List[str]=512 ,a__ : Tuple=16 ,a__ : List[Any]=2 ,a__ : Tuple=0.02 ,a__ : Tuple=6 ,a__ : Optional[int]=6 ,a__ : List[Any]=3 ,a__ : Dict=4 ,a__ : Any=None ,a__ : Any=1000 ,) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Tuple = parent
_lowerCAmelCase:Dict = batch_size
_lowerCAmelCase:Dict = num_channels
_lowerCAmelCase:Optional[int] = image_size
_lowerCAmelCase:int = patch_size
_lowerCAmelCase:List[Any] = is_training
_lowerCAmelCase:Union[str, Any] = use_input_mask
_lowerCAmelCase:List[Any] = use_token_type_ids
_lowerCAmelCase:Union[str, Any] = use_labels
_lowerCAmelCase:str = vocab_size
_lowerCAmelCase:Any = hidden_size
_lowerCAmelCase:str = num_hidden_layers
_lowerCAmelCase:Optional[int] = num_attention_heads
_lowerCAmelCase:Any = intermediate_size
_lowerCAmelCase:Union[str, Any] = hidden_act
_lowerCAmelCase:Tuple = hidden_dropout_prob
_lowerCAmelCase:Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:Union[str, Any] = type_vocab_size
_lowerCAmelCase:Dict = type_sequence_label_size
_lowerCAmelCase:Tuple = initializer_range
_lowerCAmelCase:str = coordinate_size
_lowerCAmelCase:Any = shape_size
_lowerCAmelCase:Union[str, Any] = num_labels
_lowerCAmelCase:Dict = num_choices
_lowerCAmelCase:List[str] = scope
_lowerCAmelCase:int = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase:Dict = text_seq_length
_lowerCAmelCase:Union[str, Any] = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase:Optional[int] = self.text_seq_length + self.image_seq_length
def __UpperCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size)
_lowerCAmelCase:Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox)
_lowerCAmelCase:Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase:Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase:Tuple = bbox[i, j, 1]
_lowerCAmelCase:Union[str, Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase:List[Any] = bbox[i, j, 2]
_lowerCAmelCase:Any = bbox[i, j, 0]
_lowerCAmelCase:Optional[int] = tmp_coordinate
_lowerCAmelCase:List[Any] = tf.constant(a__)
_lowerCAmelCase:Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase:Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase:Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length])
_lowerCAmelCase:Dict = None
if self.use_token_type_ids:
_lowerCAmelCase:str = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size)
_lowerCAmelCase:Any = None
_lowerCAmelCase:Any = None
if self.use_labels:
_lowerCAmelCase:str = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
_lowerCAmelCase:str = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels)
_lowerCAmelCase:List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self : List[Any] ,a__ : Optional[Any] ,a__ : List[Any] ,a__ : List[Any] ,a__ : Union[str, Any] ,a__ : str ,a__ : Optional[Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = TFLayoutLMvaModel(config=a__)
# text + image
_lowerCAmelCase:Any = model(a__ ,pixel_values=a__ ,training=a__)
_lowerCAmelCase:List[str] = model(
a__ ,bbox=a__ ,pixel_values=a__ ,attention_mask=a__ ,token_type_ids=a__ ,training=a__ ,)
_lowerCAmelCase:Union[str, Any] = model(a__ ,bbox=a__ ,pixel_values=a__ ,training=a__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
# text only
_lowerCAmelCase:List[Any] = model(a__ ,training=a__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_lowerCAmelCase:int = model({'''pixel_values''': pixel_values} ,training=a__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size))
def __UpperCamelCase ( self : List[str] ,a__ : Optional[Any] ,a__ : Optional[Any] ,a__ : str ,a__ : Optional[int] ,a__ : List[str] ,a__ : Dict ,a__ : List[str]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = self.num_labels
_lowerCAmelCase:Optional[Any] = TFLayoutLMvaForSequenceClassification(config=a__)
_lowerCAmelCase:Tuple = model(
a__ ,bbox=a__ ,pixel_values=a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__ ,training=a__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def __UpperCamelCase ( self : str ,a__ : Any ,a__ : List[Any] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : int ,a__ : Any ,a__ : Dict) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = self.num_labels
_lowerCAmelCase:Any = TFLayoutLMvaForTokenClassification(config=a__)
_lowerCAmelCase:Optional[Any] = model(
a__ ,bbox=a__ ,pixel_values=a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__ ,training=a__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels))
def __UpperCamelCase ( self : int ,a__ : Any ,a__ : str ,a__ : List[Any] ,a__ : List[str] ,a__ : Optional[Any] ,a__ : int ,a__ : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = 2
_lowerCAmelCase:Any = TFLayoutLMvaForQuestionAnswering(config=a__)
_lowerCAmelCase:int = model(
a__ ,bbox=a__ ,pixel_values=a__ ,attention_mask=a__ ,token_type_ids=a__ ,start_positions=a__ ,end_positions=a__ ,training=a__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def __UpperCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.prepare_config_and_inputs()
(_lowerCAmelCase):Dict = config_and_inputs
_lowerCAmelCase:Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : Dict ,a__ : int ,a__ : List[str] ,a__ : Dict ,a__ : List[Any] ,a__ : List[Any]) -> List[str]:
"""simple docstring"""
return True
def __UpperCamelCase ( self : Dict ,a__ : Optional[Any] ,a__ : str ,a__ : Optional[int]=False) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:List[str] = copy.deepcopy(a__)
if model_class in get_values(a__):
_lowerCAmelCase:Union[str, Any] = {
k: tf.tile(tf.expand_dims(a__ ,1) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(a__ ,tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__):
_lowerCAmelCase:Optional[Any] = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa)
elif model_class in get_values(a__):
_lowerCAmelCase:Optional[Any] = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa)
_lowerCAmelCase:Tuple = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa)
elif model_class in get_values(a__):
_lowerCAmelCase:Dict = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa)
elif model_class in get_values(a__):
_lowerCAmelCase:Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa)
return inputs_dict
def __UpperCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = TFLayoutLMvaModelTester(self)
_lowerCAmelCase:Dict = ConfigTester(self ,config_class=a__ ,hidden_size=37)
def __UpperCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:str = model_class(a__)
if getattr(a__ ,'''hf_compute_loss''' ,a__):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCAmelCase:int = self._prepare_for_class(inputs_dict.copy() ,a__ ,return_labels=a__)
_lowerCAmelCase:List[str] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=a__)[0]
]
_lowerCAmelCase:Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCAmelCase:Any = self._prepare_for_class(inputs_dict.copy() ,a__ ,return_labels=a__)
_lowerCAmelCase:Optional[int] = prepared_for_class.pop('''input_ids''')
_lowerCAmelCase:Any = model(a__ ,**a__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
_lowerCAmelCase:List[str] = self._prepare_for_class(inputs_dict.copy() ,a__ ,return_labels=a__)
_lowerCAmelCase:Optional[int] = prepared_for_class.pop('''input_ids''')
if "labels" in prepared_for_class:
_lowerCAmelCase:List[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
_lowerCAmelCase:int = -100
_lowerCAmelCase:List[Any] = tf.convert_to_tensor(a__)
_lowerCAmelCase:List[Any] = model(a__ ,**a__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
_lowerCAmelCase:List[Any] = self._prepare_for_class(inputs_dict.copy() ,a__ ,return_labels=a__)
_lowerCAmelCase:int = model(a__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
_lowerCAmelCase:Union[str, Any] = self._prepare_for_class(inputs_dict.copy() ,a__ ,return_labels=a__)
# Get keys that were added with the _prepare_for_class function
_lowerCAmelCase:Tuple = prepared_for_class.keys() - inputs_dict.keys()
_lowerCAmelCase:Optional[Any] = inspect.signature(model.call).parameters
_lowerCAmelCase:Any = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
_lowerCAmelCase:Any = {0: '''input_ids'''}
for label_key in label_keys:
_lowerCAmelCase:Union[str, Any] = signature_names.index(a__)
_lowerCAmelCase:Optional[Any] = label_key
_lowerCAmelCase:Tuple = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCAmelCase:Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
_lowerCAmelCase:Union[str, Any] = prepared_for_class[value]
_lowerCAmelCase:List[str] = tuple(a__)
# Send to model
_lowerCAmelCase:str = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def __UpperCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
(
_lowerCAmelCase
):List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a__ ,a__ ,a__ ,a__ ,a__ ,a__)
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
(
_lowerCAmelCase
):List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase:str = type
self.model_tester.create_and_check_model(a__ ,a__ ,a__ ,a__ ,a__ ,a__)
def __UpperCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
(
_lowerCAmelCase
):int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__)
def __UpperCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
(
_lowerCAmelCase
):List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__)
def __UpperCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
(
_lowerCAmelCase
):Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__)
@slow
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Dict = TFLayoutLMvaModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=a__) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Tuple = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''')
_lowerCAmelCase:Union[str, Any] = self.default_image_processor
_lowerCAmelCase:Optional[Any] = prepare_img()
_lowerCAmelCase:Union[str, Any] = image_processor(images=a__ ,return_tensors='''tf''').pixel_values
_lowerCAmelCase:str = tf.constant([[1, 2]])
_lowerCAmelCase:str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) ,axis=0)
# forward pass
_lowerCAmelCase:str = model(input_ids=a__ ,bbox=a__ ,pixel_values=a__ ,training=a__)
# verify the logits
_lowerCAmelCase:Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,a__)
_lowerCAmelCase:Any = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,a__ ,atol=1E-4))
| 227 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
snake_case__ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
a__: str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__: Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__: Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__: Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a__: bool = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a__: str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a__: bool = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase :
a__: Optional[str] = field(default=__lowerCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a__: Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a__: bool = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__: bool = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _lowerCAmelCase ( self : Optional[Any] ):
if self.train_file is not None:
lowercase : List[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowercase : Optional[int] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase :
a__: PreTrainedTokenizerBase
a__: Union[bool, str, PaddingStrategy] = True
a__: Optional[int] = None
a__: Optional[int] = None
def __call__( self : Union[str, Any] , lowerCAmelCase : int ):
lowercase : Any = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase : Any = [feature.pop(lowerCAmelCase ) for feature in features]
lowercase : int = len(lowerCAmelCase )
lowercase : List[str] = len(features[0]['''input_ids'''] )
lowercase : Optional[int] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase )] for feature in features
]
lowercase : List[Any] = list(chain(*lowerCAmelCase ) )
lowercase : List[str] = self.tokenizer.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowercase : Optional[Any] = {k: v.view(lowerCAmelCase , lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowercase : str = torch.tensor(lowerCAmelCase , dtype=torch.intaa )
return batch
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase : int = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowercase : Dict = {}
if data_args.train_file is not None:
lowercase : str = data_args.train_file
if data_args.validation_file is not None:
lowercase : List[str] = data_args.validation_file
lowercase : Dict = data_args.train_file.split('''.''' )[-1]
lowercase : Any = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowercase : str = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowercase : Union[str, Any] = [f'''ending{i}''' for i in range(4 )]
lowercase : List[str] = '''sent1'''
lowercase : str = '''sent2'''
if data_args.max_seq_length is None:
lowercase : Optional[int] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowercase : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowercase : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ : List[str] ):
lowercase : int = [[context] * 4 for context in examples[context_name]]
lowercase : Dict = examples[question_header_name]
lowercase : Optional[int] = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
lowercase : str = list(chain(*UpperCAmelCase_ ) )
lowercase : Any = list(chain(*UpperCAmelCase_ ) )
# Tokenize
lowercase : Union[str, Any] = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowercase : Dict = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowercase : Optional[Any] = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
lowercase : Union[str, Any] = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowercase : Tuple = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowercase : Optional[Any] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowercase : Optional[int] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
lowercase : int = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowercase : str = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowercase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ : Any ):
lowercase , lowercase : int = eval_predictions
lowercase : Tuple = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowercase : List[str] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
lowercase : Any = None
if training_args.resume_from_checkpoint is not None:
lowercase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase : Any = last_checkpoint
lowercase : List[Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase : int = train_result.metrics
lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
lowercase : Any = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('''train''' , UpperCAmelCase_ )
trainer.save_metrics('''train''' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : Tuple = trainer.evaluate()
lowercase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
lowercase : Any = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('''eval''' , UpperCAmelCase_ )
trainer.save_metrics('''eval''' , UpperCAmelCase_ )
lowercase : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 583 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a_ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a_ : Tuple = TaTokenizerFast
a_ : Tuple = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a_ : Dict = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 484 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _a (self ):
'''simple docstring'''
try:
lowerCamelCase = tempfile.mktemp()
with open(__a , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __a )
lowerCamelCase = AlbertTokenizer.from_pretrained(__a )
finally:
os.remove(__a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __a )
lowerCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _a (cls ):
'''simple docstring'''
lowerCamelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def _a (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a , repo_id="test-tokenizer" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__a , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a (self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = CustomTokenizer(__a )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
lowerCamelCase = CustomTokenizerFast.from_pretrained(__a )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCamelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__a , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
lowerCamelCase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__a , ["AB", "C"] ) | 484 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __A ( __lowerCamelCase ) -> Any:
# getting number of pixels in the image
a , a = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
a = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCamelCase : Optional[Any] = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCamelCase : Dict = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 468 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowerCAmelCase :
UpperCamelCase__ = PegasusConfig
UpperCamelCase__ = {}
UpperCamelCase__ = '''gelu'''
def __init__( self :int , __magic_name__ :Optional[int] , __magic_name__ :str=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[int]=False , __magic_name__ :List[Any]=99 , __magic_name__ :int=32 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=4 , __magic_name__ :Dict=37 , __magic_name__ :Tuple=0.1 , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :Dict=40 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[Any]=1 , __magic_name__ :Dict=0 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Any , __magic_name__ :str ):
'''simple docstring'''
a = TFPegasusModel(config=__magic_name__ ).get_decoder()
a = inputs_dict["""input_ids"""]
a = input_ids[:1, :]
a = inputs_dict["""attention_mask"""][:1, :]
a = inputs_dict["""head_mask"""]
a = 1
# first forward pass
a = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
a = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Tuple:
if attention_mask is None:
a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = TFPegasusModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase__ = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase__ = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase__ ( self :int , **__magic_name__ :int ):
'''simple docstring'''
a = self.translate_src_text(**__magic_name__ )
assert self.expected_text == generated_words
def lowerCamelCase__ ( self :Union[str, Any] , **__magic_name__ :int ):
'''simple docstring'''
a = self.tokenizer(self.src_text , **__magic_name__ , padding=__magic_name__ , return_tensors="""tf""" )
a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__magic_name__ , )
a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )
return generated_words
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 468 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ):
__UpperCAmelCase : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = True, True
__UpperCAmelCase : Optional[Any] = dfs(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return path
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = -1
for i in range(_UpperCAmelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__UpperCAmelCase : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[str] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__UpperCAmelCase , __UpperCAmelCase : str = check_circuit_or_path(_UpperCAmelCase, _UpperCAmelCase )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
__UpperCAmelCase : Union[str, Any] = 1
if check == 2:
__UpperCAmelCase : List[str] = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
__UpperCAmelCase : Union[str, Any] = dfs(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
print(_UpperCAmelCase )
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__UpperCAmelCase : int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__UpperCAmelCase : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__UpperCAmelCase : Union[str, Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__UpperCAmelCase : Any = {
1: [],
2: []
# all degree is zero
}
__UpperCAmelCase : Any = 10
check_euler(_UpperCAmelCase, _UpperCAmelCase )
check_euler(_UpperCAmelCase, _UpperCAmelCase )
check_euler(_UpperCAmelCase, _UpperCAmelCase )
check_euler(_UpperCAmelCase, _UpperCAmelCase )
check_euler(_UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
main()
| 329 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase__ : Tuple = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCAmelCase__ : str = {"facebook/blenderbot_small-90M": 5_12}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : List[str] = set()
__UpperCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : str = char
__UpperCAmelCase : Optional[int] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str="__start__" , UpperCAmelCase_ : List[Any]="__end__" , UpperCAmelCase_ : str="__unk__" , UpperCAmelCase_ : int="__null__" , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : Any = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Dict = [tuple(merge.split() ) for merge in merges]
__UpperCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__UpperCAmelCase : Dict = {}
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = re.sub("([.,!?()])" , R" \1" , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = re.sub("(')" , R" \1 " , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = re.sub(R"\s{2,}" , " " , UpperCAmelCase_ )
if "\n" in token:
__UpperCAmelCase : List[Any] = token.replace("\n" , " __newln__" )
__UpperCAmelCase : str = token.split(" " )
__UpperCAmelCase : int = []
for token in tokens:
if not len(UpperCAmelCase_ ):
continue
__UpperCAmelCase : Any = token.lower()
__UpperCAmelCase : Optional[Any] = tuple(UpperCAmelCase_ )
__UpperCAmelCase : int = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCAmelCase : Union[str, Any] = get_pairs(UpperCAmelCase_ )
if not pairs:
words.append(UpperCAmelCase_ )
continue
while True:
__UpperCAmelCase : Union[str, Any] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : int = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCAmelCase_ ):
try:
__UpperCAmelCase : str = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
new_word.extend(word[i:j] )
__UpperCAmelCase : Dict = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
__UpperCAmelCase : Union[str, Any] = get_pairs(UpperCAmelCase_ )
__UpperCAmelCase : int = "@@ ".join(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = word[:-4]
__UpperCAmelCase : Any = word
words.append(UpperCAmelCase_ )
return " ".join(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Dict = re.findall(R"\S+\n?" , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(" " ) ) )
return split_tokens
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : str = token.lower()
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Any = " ".join(UpperCAmelCase_ ).replace("@@ " , "" ).strip()
return out_string
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : List[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" )
__UpperCAmelCase : Dict = 0
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Union[str, Any] = token_index
writer.write(" ".join(UpperCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 329 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A : Optional[Any] = get_tests_dir("""fixtures""")
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : List[str] =mock.Mock()
snake_case : Union[str, Any] =500
snake_case : Optional[int] ={}
snake_case : str =HTTPError
snake_case : Dict ={}
# Download this model to make sure it's in the cache.
snake_case : Optional[int] =WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=__lowerCAmelCase ) as mock_head:
snake_case : Optional[int] =WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : int =WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
@classmethod
def __snake_case ( cls : int ):
'''simple docstring'''
snake_case : Optional[Any] =TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def __snake_case ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __snake_case ( self : str ):
'''simple docstring'''
snake_case : List[Any] =WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub('''test-feature-extractor''', use_auth_token=self._token )
snake_case : List[Any] =WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase, getattr(__lowerCAmelCase, __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase, repo_id='''test-feature-extractor''', push_to_hub=__lowerCAmelCase, use_auth_token=self._token )
snake_case : Tuple =WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase, getattr(__lowerCAmelCase, __lowerCAmelCase ) )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Dict =WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''', use_auth_token=self._token )
snake_case : Optional[int] =WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase, getattr(__lowerCAmelCase, __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase, repo_id='''valid_org/test-feature-extractor-org''', push_to_hub=__lowerCAmelCase, use_auth_token=self._token )
snake_case : str =WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase, getattr(__lowerCAmelCase, __lowerCAmelCase ) )
def __snake_case ( self : List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
snake_case : Optional[Any] =CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map, {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''}, )
snake_case : Optional[int] =AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''', trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, '''CustomFeatureExtractor''' )
| 349 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass | 46 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Any = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 178 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__ : Tuple = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None) | 178 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 141 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = nn.ModuleList(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = True , ):
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase , self.nets ) ):
UpperCamelCase__ , UpperCamelCase__ = controlnet(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# merge samples
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = down_samples, mid_sample
else:
UpperCamelCase__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase , __lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
UpperCamelCase__ = 0
UpperCamelCase__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase , is_main_process=__lowerCAmelCase , save_function=__lowerCAmelCase , safe_serialization=__lowerCAmelCase , variant=__lowerCAmelCase , )
idx += 1
UpperCamelCase__ = model_path_to_save + f"""_{idx}"""
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = 0
UpperCamelCase__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase__ = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
UpperCamelCase__ = ControlNetModel.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
UpperCamelCase__ = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__lowerCAmelCase )
| 548 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCAmelCase ):
UpperCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCAmelCase ):
UpperCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = FlaxBertModel.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
@slow
def _lowerCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = FlaxRobertaModel.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""bert-base""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(__lowerCAmelCase , """Use `from_pt=True` to load this model""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 548 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_UpperCamelCase = ''' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'''
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
UpperCAmelCase__ = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase__ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def UpperCamelCase__ (self , __a , __a , __a , __a=None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase__ = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
UpperCAmelCase__ = os.path.join(self.transformer_dir , 'new_code.py' )
with open(UpperCamelCase__ , 'w' , newline='\n' ) as f:
f.write(UpperCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase__ )
with open(UpperCamelCase__ , 'r' ) as f:
self.assertTrue(f.read() , UpperCamelCase__ )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , UpperCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , UpperCamelCase__ ) , )
# Copy consistency with a really long name
UpperCAmelCase__ = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('Bert' , UpperCamelCase__ , UpperCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , UpperCamelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , UpperCamelCase__ ) , )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = check_copies.LOCALIZED_READMES['README_zh-hans.md']
UpperCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
UpperCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
UpperCAmelCase__ , UpperCAmelCase__ = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme['format_model_list'] )
self.assertFalse(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase__ )
UpperCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
UpperCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase__ , UpperCAmelCase__ = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 146 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__lowerCAmelCase = "pytorch_model.bin"
__lowerCAmelCase = "pytorch_model.bin.index.json"
__lowerCAmelCase = "adapter_config.json"
__lowerCAmelCase = "adapter_model.bin"
__lowerCAmelCase = "adapter_model.safetensors"
__lowerCAmelCase = "tf_model.h5"
__lowerCAmelCase = "tf_model.h5.index.json"
__lowerCAmelCase = "model.ckpt"
__lowerCAmelCase = "flax_model.msgpack"
__lowerCAmelCase = "flax_model.msgpack.index.json"
__lowerCAmelCase = "model.safetensors"
__lowerCAmelCase = "model.safetensors.index.json"
__lowerCAmelCase = "config.json"
__lowerCAmelCase = "preprocessor_config.json"
__lowerCAmelCase = FEATURE_EXTRACTOR_NAME
__lowerCAmelCase = "generation_config.json"
__lowerCAmelCase = "modelcard.json"
__lowerCAmelCase = "▁"
__lowerCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__lowerCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__lowerCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__lowerCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __UpperCamelCase ( lowercase_ : Optional[int] ):
"""simple docstring"""
if version.parse(lowercase_ ) < version.parse(lowercase_ ):
if "dev" in min_version:
a_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
a_ = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 536 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
lowercase : List[Any] = StableDiffusionXLImgaImgPipeline
lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
a : Optional[int] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
a : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=32 , )
a : Dict = CLIPTextModel(__UpperCAmelCase )
a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
a : Optional[Any] = CLIPTextModelWithProjection(__UpperCAmelCase )
a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
a : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> List[str]:
a : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
a : List[Any] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
a : int = torch.manual_seed(__UpperCAmelCase )
else:
a : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def lowercase_ ( self ) -> Any:
a : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
a : List[Any] = self.get_dummy_components()
a : str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
a : Optional[int] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[str] = self.get_dummy_inputs(__UpperCAmelCase )
a : List[str] = sd_pipe(**__UpperCAmelCase ).images
a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Tuple = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowercase_ ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Dict:
a : Union[str, Any] = self.get_dummy_components()
a : str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
a : List[Any] = sd_pipe.to(__UpperCAmelCase )
a : Union[str, Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
a : List[str] = self.get_dummy_inputs(__UpperCAmelCase )
a : int = 3 * ["""this is a negative prompt"""]
a : str = negative_prompt
a : Union[str, Any] = 3 * [inputs["""prompt"""]]
a : str = sd_pipe(**__UpperCAmelCase )
a : Union[str, Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
a : List[str] = self.get_dummy_inputs(__UpperCAmelCase )
a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
a : Any = 3 * [inputs.pop('prompt' )]
(
a
) : Any = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
a : Union[str, Any] = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
a : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> Optional[int]:
a : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
a : Tuple = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
a : List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self ) -> Dict:
a : str = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : str = self.get_inputs(__UpperCAmelCase )
a : Dict = pipe(**__UpperCAmelCase ).images
a : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
a : List[str] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 708 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : int = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
SCREAMING_SNAKE_CASE__ : Tuple = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class A_ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase : str = CamembertTokenizer
lowercase : str = CamembertTokenizerFast
lowercase : List[str] = True
lowercase : Tuple = True
def lowercase_ ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
a : List[str] = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> int:
a : Dict = '<pad>'
a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__UpperCAmelCase ) , 10_04 )
def lowercase_ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowercase_ ( self ) -> int:
a : List[str] = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
a : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
a : Tuple = 'I was born in 92000, and this is falsé.'
a : Union[str, Any] = tokenizer.encode(__UpperCAmelCase )
a : Union[str, Any] = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a : Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
a : Tuple = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a : Tuple = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
a : Tuple = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self ) -> str:
if not self.test_rust_tokenizer:
return
a : Tuple = self.get_tokenizer()
a : List[str] = self.get_rust_tokenizer()
a : Optional[Any] = 'I was born in 92000, and this is falsé.'
a : Optional[int] = tokenizer.tokenize(__UpperCAmelCase )
a : str = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a : Optional[int] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
a : Tuple = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a : str = self.get_rust_tokenizer()
a : List[Any] = tokenizer.encode(__UpperCAmelCase )
a : List[Any] = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowercase_ ( self ) -> List[str]:
# fmt: off
a : Optional[int] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a : Any = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=__UpperCAmelCase , )
| 509 | 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = SMALL_MODEL_IDENTIFIER
UpperCAmelCase_ : List[Any] = "pt"
UpperCAmelCase_ : List[str] = "tf"
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : int = TFAutoModel.from_pretrained(self.test_model ,from_pt=_snake_case )
model_tf.save_pretrained(_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = "mock_framework"
# Framework provided - return whatever the user provides
UpperCAmelCase_ : List[Any] = FeaturesManager.determine_framework(self.test_model ,_snake_case )
self.assertEqual(_snake_case ,_snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
UpperCAmelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case ,_snake_case )
self.assertEqual(_snake_case ,_snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
UpperCAmelCase_ : int = FeaturesManager.determine_framework(_snake_case ,_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
UpperCAmelCase_ : List[str] = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case ,self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
UpperCAmelCase_ : Any = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case ,self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_snake_case ):
UpperCAmelCase_ : List[Any] = FeaturesManager.determine_framework(_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = MagicMock(return_value=_snake_case )
with patch("transformers.onnx.features.is_tf_available" ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case ,self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase_ : Dict = MagicMock(return_value=_snake_case )
with patch("transformers.onnx.features.is_torch_available" ,_snake_case ):
UpperCAmelCase_ : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case ,self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase_ : List[Any] = MagicMock(return_value=_snake_case )
UpperCAmelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
with patch("transformers.onnx.features.is_tf_available" ,_snake_case ), patch(
"transformers.onnx.features.is_torch_available" ,_snake_case ):
UpperCAmelCase_ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case ,self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase_ : Optional[int] = MagicMock(return_value=_snake_case )
UpperCAmelCase_ : Any = MagicMock(return_value=_snake_case )
with patch("transformers.onnx.features.is_tf_available" ,_snake_case ), patch(
"transformers.onnx.features.is_torch_available" ,_snake_case ):
with self.assertRaises(_snake_case ):
UpperCAmelCase_ : Dict = FeaturesManager.determine_framework(self.test_model )
| 71 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str ,a__ : Union[str, Any] ,a__ : Any=13 ,a__ : Dict=30 ,a__ : Union[str, Any]=2 ,a__ : Optional[Any]=3 ,a__ : List[Any]=True ,a__ : str=True ,a__ : Tuple=32 ,a__ : Any=5 ,a__ : Dict=4 ,a__ : Dict=37 ,a__ : List[Any]="gelu" ,a__ : List[Any]=0.1 ,a__ : Union[str, Any]=0.1 ,a__ : Optional[int]=10 ,a__ : Dict=0.02 ,a__ : List[str]=None ,):
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = num_patches + 1
def lowerCAmelCase_ ( self : Dict ):
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Tuple ):
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase_ ( self : str ,a__ : Any ,a__ : Tuple ,a__ : Optional[Any] ):
a__ = ViTMSNModel(config=a__ )
model.to(a__ )
model.eval()
a__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] ,a__ : Union[str, Any] ,a__ : List[Any] ,a__ : List[str] ):
a__ = self.type_sequence_label_size
a__ = ViTMSNForImageClassification(a__ )
model.to(a__ )
model.eval()
a__ = model(a__ ,labels=a__ )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ = 1
a__ = ViTMSNForImageClassification(a__ )
model.to(a__ )
model.eval()
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCAmelCase_ ( self : List[Any] ):
a__ = ViTMSNModelTester(self )
a__ = ConfigTester(self ,config_class=a__ ,has_text_modality=a__ ,hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ ,nn.Linear ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(a__ )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,a__ )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = ViTMSNModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ():
"""simple docstring"""
a__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase_ ( self : Dict ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(2 )
a__ = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(a__ )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=a__ ,return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
a__ = model(**a__ )
# verify the logits
a__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,a__ )
a__ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a__ ,atol=1e-4 ) )
| 331 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = """van"""
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[3, 3, 12, 3] , __UpperCAmelCase=[8, 8, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=1e-2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
__A : Optional[int] = image_size
__A : str = num_channels
__A : List[Any] = patch_sizes
__A : Optional[int] = strides
__A : Any = hidden_sizes
__A : Tuple = depths
__A : Tuple = mlp_ratios
__A : Optional[int] = hidden_act
__A : int = initializer_range
__A : int = layer_norm_eps
__A : int = layer_scale_init_value
__A : Optional[Any] = drop_path_rate
__A : List[Any] = dropout_rate
| 703 | import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase=5 ) -> str:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("<mask>" ) == 1
__A : Optional[Any] = torch.tensor(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) ).unsqueeze(0 ) # Batch size 1
__A : List[str] = model(_lowercase )[0] # The last hidden-state is the first element of the output tuple
__A : Optional[int] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__A : Optional[Any] = logits[0, masked_index, :]
__A : int = logits.softmax(dim=0 )
__A , __A : Union[str, Any] = prob.topk(k=_lowercase , dim=0 )
__A : Dict = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowercase ) )] )
__A : Dict = tokenizer.mask_token
__A : str = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
__A : int = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowercase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowercase ) , _lowercase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowercase , _lowercase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCamelCase = CamembertTokenizer.from_pretrained('camembert-base')
UpperCamelCase = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
UpperCamelCase = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 387 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class A:
'''simple docstring'''
def __init__( self : Tuple , A_ : int ) -> None:
"""simple docstring"""
lowerCamelCase_ = value
lowerCamelCase_ = None
lowerCamelCase_ = None
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Node ) -> None:
"""simple docstring"""
lowerCamelCase_ = tree
def a__ ( self : Optional[int] , A_ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( __snake_case: Optional[int] ) -> Tuple:
"""simple docstring"""
for param in module.parameters():
__magic_name__ = False
def A ( ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__magic_name__ = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A ( __snake_case: int ) -> List[Any]:
"""simple docstring"""
__magic_name__ = plt.imshow(__snake_case )
fig.axes.get_xaxis().set_visible(__snake_case )
fig.axes.get_yaxis().set_visible(__snake_case )
plt.show()
def A ( ) -> List[Any]:
"""simple docstring"""
__magic_name__ = datetime.now()
__magic_name__ = current_time.strftime('%H:%M:%S' )
return timestamp | 545 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: Optional[int] = ["""pixel_values"""]
def __init__( self : Dict , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Union[str, Any] , ):
super().__init__(**lowerCamelCase_ )
a_ : Tuple = size if size is not None else {"""shortest_edge""": 2_2_4}
a_ : Tuple = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
a_ : Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
a_ : List[str] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ , param_name="""crop_size""" )
a_ : int = do_resize
a_ : Tuple = size
a_ : Any = resample
a_ : Union[str, Any] = do_center_crop
a_ : str = crop_size
a_ : Dict = do_rescale
a_ : int = rescale_factor
a_ : Tuple = do_normalize
a_ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : List[Any] = do_convert_rgb
def UpperCAmelCase( self : str , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Tuple , ):
a_ : Tuple = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a_ : str = get_resize_output_image_size(lowerCamelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Dict , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Dict , ):
a_ : List[Any] = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[int, float] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : List[str] , ):
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Any , ):
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : ImageInput , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : int = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : float = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase_ : Any , ):
a_ : List[str] = do_resize if do_resize is not None else self.do_resize
a_ : Optional[Any] = size if size is not None else self.size
a_ : Dict = get_size_dict(lowerCamelCase_ , param_name="""size""" , default_to_square=lowerCamelCase_ )
a_ : Optional[int] = resample if resample is not None else self.resample
a_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : List[str] = crop_size if crop_size is not None else self.crop_size
a_ : List[Any] = get_size_dict(lowerCamelCase_ , param_name="""crop_size""" , default_to_square=lowerCamelCase_ )
a_ : int = do_rescale if do_rescale is not None else self.do_rescale
a_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : str = do_normalize if do_normalize is not None else self.do_normalize
a_ : Tuple = image_mean if image_mean is not None else self.image_mean
a_ : List[str] = image_std if image_std is not None else self.image_std
a_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : Optional[int] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : Tuple = [convert_to_rgb(lowerCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
a_ : Tuple = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
a_ : List[str] = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
a_ : Dict = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
a_ : Tuple = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
a_ : Optional[Any] = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
a_ : List[Any] = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
a_ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 478 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( __UpperCamelCase ):
a_ : int = int(number**0.5 )
return number == sq * sq
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a_ : int = x_den * y_den * z_den
a_ : int = gcd(__UpperCamelCase , __UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( __UpperCamelCase = 3_5 ):
a_ : set = set()
a_ : int
a_ : Fraction = Fraction(0 )
a_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a_ : int = x_num * y_den + x_den * y_num
a_ : Optional[int] = x_den * y_den
a_ : Optional[Any] = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ : Optional[int] = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
a_ : List[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a_ : str = x_den * x_den * y_den * y_den
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
a_ : Optional[Any] = int(sqrt(__UpperCamelCase ) )
a_ : Optional[int] = int(sqrt(__UpperCamelCase ) )
a_ : Optional[Any] = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ : str = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=-1
a_ : Union[str, Any] = x_num * y_num
a_ : Tuple = x_den * y_num + x_num * y_den
a_ : List[Any] = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ : Union[str, Any] = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
a_ : Union[str, Any] = x_num * x_num * y_num * y_num
a_ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
a_ : Dict = int(sqrt(__UpperCamelCase ) )
a_ : Union[str, Any] = int(sqrt(__UpperCamelCase ) )
a_ : Dict = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ : List[Any] = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
for num, den in unique_s:
total += Fraction(__UpperCamelCase , __UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 478 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 | '''simple docstring'''
from __future__ import annotations
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = array[indexa], array[indexa]
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : List[Any] = int(length / 2 )
for i in range(_A , low + middle ):
comp_and_swap(_A , _A , i + middle , _A )
bitonic_merge(_A , _A , _A , _A )
bitonic_merge(_A , low + middle , _A , _A )
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : Optional[int] = int(length / 2 )
bitonic_sort(_A , _A , _A , 1 )
bitonic_sort(_A , low + middle , _A , 0 )
bitonic_merge(_A , _A , _A , _A )
if __name__ == "__main__":
UpperCamelCase__ : Dict = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 614 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =3_8_4
if "tiny" in model_name:
__UpperCamelCase =[3, 3, 9, 3]
__UpperCamelCase =[9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
__UpperCamelCase =[3, 3, 2_7, 3]
__UpperCamelCase =[9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
__UpperCamelCase =[3, 3, 2_7, 3]
__UpperCamelCase =[1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
__UpperCamelCase =5_1_2
if "large" in model_name:
__UpperCamelCase =[3, 3, 2_7, 3]
__UpperCamelCase =[1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
__UpperCamelCase =7_6_8
if "xlarge" in model_name:
__UpperCamelCase =[3, 3, 2_7, 3]
__UpperCamelCase =[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
__UpperCamelCase =1_0_2_4
# set label information
__UpperCamelCase =1_5_0
__UpperCamelCase ="""huggingface/label-files"""
__UpperCamelCase ="""ade20k-id2label.json"""
__UpperCamelCase =json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =ConvNextConfig(
depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__UpperCamelCase =UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =[]
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =dct.pop(lowerCamelCase_ )
__UpperCamelCase =val
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase ={
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
__UpperCamelCase =model_name_to_url[model_name]
__UpperCamelCase =torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='''cpu''' )["""state_dict"""]
__UpperCamelCase =get_upernet_config(lowerCamelCase_ )
__UpperCamelCase =UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCamelCase =state_dict.pop(lowerCamelCase_ )
if "bn" in key:
__UpperCamelCase =key.replace('''bn''' , '''batch_norm''' )
__UpperCamelCase =val
# rename keys
__UpperCamelCase =create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
__UpperCamelCase ="""https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__UpperCamelCase =Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
__UpperCamelCase =SegformerImageProcessor()
__UpperCamelCase =processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__UpperCamelCase =model(lowerCamelCase_ )
if model_name == "upernet-convnext-tiny":
__UpperCamelCase =torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
__UpperCamelCase =torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
__UpperCamelCase =torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
__UpperCamelCase =torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCamelCase =torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 713 | """simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
try:
with open(__UpperCamelCase , '''rb''' ) as flax_state_f:
__UpperCamelCase =from_bytes(__UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCamelCase ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__UpperCamelCase =flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa , __UpperCamelCase ) ).values()
if any(__UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__UpperCamelCase =jax.tree_util.tree_map(
lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCamelCase )
__UpperCamelCase =''''''
__UpperCamelCase =flatten_dict(__UpperCamelCase , sep='''.''' )
__UpperCamelCase =pt_model.state_dict()
# keep track of unexpected & missing keys
__UpperCamelCase =[]
__UpperCamelCase =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase =flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight''']
__UpperCamelCase =jnp.transpose(__UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight''']
__UpperCamelCase =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCamelCase ):
__UpperCamelCase =(
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__UpperCamelCase ='''.'''.join(__UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__UpperCamelCase =np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase , np.ndarray ) else flax_tensor
__UpperCamelCase =torch.from_numpy(__UpperCamelCase )
# remove from missing keys
missing_keys.remove(__UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCamelCase )
pt_model.load_state_dict(__UpperCamelCase )
# re-transform missing_keys to list
__UpperCamelCase =list(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(__UpperCamelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 296 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__snake_case :Dict =logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
A_ : Optional[int] = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : Optional[bool] = field(
default=_lowerCamelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
A = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
A = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = train_dataset.features['label'].names
if training_args.do_eval:
A = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = eval_dataset.features['label'].names
if training_args.do_predict:
A = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = predict_dataset.features['label'].names
# Labels
A = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , idalabel={str(lowerCAmelCase__ ): label for i, label in enumerate(lowerCAmelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase__ )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
A = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A = False
def preprocess_function(lowerCAmelCase__ : Dict ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowerCAmelCase__ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
A = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
A = train_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
A = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A = min(len(lowerCAmelCase__ ) , data_args.max_eval_samples )
A = eval_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
A = eval_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
A = min(len(lowerCAmelCase__ ) , data_args.max_predict_samples )
A = predict_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
A = predict_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
A = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ : EvalPrediction ):
A = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
A = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A = default_data_collator
elif training_args.fpaa:
A = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
A = None
# Initialize our Trainer
A = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
A = train_result.metrics
A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCAmelCase__ )
trainer.save_metrics('train' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('eval' , lowerCAmelCase__ )
trainer.save_metrics('eval' , lowerCAmelCase__ )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
A , A , A = trainer.predict(lowerCAmelCase__ , metric_key_prefix='predict' )
A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase__ )
)
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('predict' , lowerCAmelCase__ )
trainer.save_metrics('predict' , lowerCAmelCase__ )
A = np.argmax(lowerCAmelCase__ , axis=1 )
A = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCAmelCase__ ):
A = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main() | 106 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple , ):
super().__init__()
self.register_modules(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def lowerCamelCase ( self : str ):
self.enable_attention_slicing(UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : str = 512 , snake_case_ : Optional[int] = 512 , snake_case_ : int = 50 , snake_case_ : Union[str, Any] = 7.5 , snake_case_ : int = None , snake_case_ : Union[str, Any] = 1 , snake_case_ : Any = 0.0 , snake_case_ : List[Any] = None , snake_case_ : Tuple = None , snake_case_ : Any = "pil" , snake_case_ : int = True , snake_case_ : Any = None , snake_case_ : Union[str, Any] = 1 , snake_case_ : Optional[Any] = None , **snake_case_ : Dict , ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case__ : Tuple = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case__ : Dict = len(UpperCamelCase__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(UpperCamelCase__ )}." )
# get prompt text embeddings
snake_case__ : Any = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case__ : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case__ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case__ : Any = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case__ : Dict = text_embeddings.shape
snake_case__ : Any = text_embeddings.repeat(1 , UpperCamelCase__ , 1 )
snake_case__ : Any = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case__ : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[str]
if negative_prompt is None:
snake_case__ : Dict = ['''''']
elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !="
f" {type(UpperCamelCase__ )}." )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case__ : Tuple = [negative_prompt]
elif batch_size != len(UpperCamelCase__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
snake_case__ : Optional[Any] = negative_prompt
snake_case__ : int = text_input_ids.shape[-1]
snake_case__ : Dict = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
snake_case__ : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case__ : int = uncond_embeddings.shape[1]
snake_case__ : List[str] = uncond_embeddings.repeat(UpperCamelCase__ , UpperCamelCase__ , 1 )
snake_case__ : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case__ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case__ : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
snake_case__ : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case__ : Any = torch.randn(
UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__ ).to(self.device )
snake_case__ : Dict = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__ ).to(
self.device )
else:
snake_case__ : Any = torch.randn(
UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
snake_case__ : Optional[Any] = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
snake_case__ : Union[str, Any] = latents_reference.to(self.device )
snake_case__ : List[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case__ : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case__ : Dict = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case__ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case__ : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case__ : Dict = 0 if dx < 0 else dx
snake_case__ : Optional[Any] = 0 if dy < 0 else dy
snake_case__ : Union[str, Any] = max(-dx , 0 )
snake_case__ : str = max(-dy , 0 )
# import pdb
# pdb.set_trace()
snake_case__ : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case__ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : int = {}
if accepts_eta:
snake_case__ : Optional[Any] = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : List[str] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
snake_case__ : List[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case__ : List[Any] = noise_pred.chunk(2 )
snake_case__ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : List[Any] = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[Any] = 1 / 0.18215 * latents
snake_case__ : List[Any] = self.vae.decode(UpperCamelCase__ ).sample
snake_case__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
snake_case__ : Optional[int] = self.feature_extractor(self.numpy_to_pil(UpperCamelCase__ ) , return_tensors="""pt""" ).to(
self.device )
snake_case__ : List[Any] = self.safety_checker(
images=UpperCamelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case__ : List[Any] = None
if output_type == "pil":
snake_case__ : Any = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
| 711 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 600_851_475_143 ) -> int:
try:
snake_case__ : str = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
snake_case__ : Dict = 2
snake_case__ : Optional[Any] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case__ : Tuple = i
while n % i == 0:
snake_case__ : int = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 301 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : List[Any] = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 663 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> int:
__lowerCamelCase : Optional[Any] = 1.5
__lowerCamelCase : Tuple = int(factor * num_class_images )
__lowerCamelCase : Tuple = ClipClient(
url='https://knn.laion.ai/knn-service' ,indice_name='laion_400m' ,num_images=_lowerCAmelCase ,aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' ,exist_ok=_lowerCAmelCase )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__lowerCamelCase : Dict = client.query(text=_lowerCAmelCase )
if len(_lowerCAmelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
__lowerCamelCase : Union[str, Any] = int(factor * num_images )
__lowerCamelCase : List[str] = ClipClient(
url='https://knn.laion.ai/knn-service' ,indice_name='laion_400m' ,num_images=_lowerCAmelCase ,aesthetic_weight=0.1 ,)
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : int = tqdm(desc='downloading real regularization images' ,total=_lowerCAmelCase )
with open(F'{class_data_dir}/caption.txt' ,'w' ) as fa, open(F'{class_data_dir}/urls.txt' ,'w' ) as fa, open(
F'{class_data_dir}/images.txt' ,'w' ) as fa:
while total < num_class_images:
__lowerCamelCase : List[str] = class_images[count]
count += 1
try:
__lowerCamelCase : int = requests.get(images['url'] )
if img.status_code == 200:
__lowerCamelCase : str = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' ,'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a_ ( ) -> Dict:
__lowerCamelCase : Any = argparse.ArgumentParser('' ,add_help=_lowerCAmelCase )
parser.add_argument('--class_prompt' ,help='text prompt to retrieve images' ,required=_lowerCAmelCase ,type=_lowerCAmelCase )
parser.add_argument('--class_data_dir' ,help='path to save images' ,required=_lowerCAmelCase ,type=_lowerCAmelCase )
parser.add_argument('--num_class_images' ,help='number of images to download' ,default=200 ,type=_lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
_UpperCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 459 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
lowerCAmelCase__ = '''naver-clova-ix/donut-base'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = DonutProcessor.from_pretrained(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
_lowerCamelCase : Dict = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
_lowerCamelCase : Union[str, Any] = self.processor.tokenajson(__lowerCAmelCase )
self.assertDictEqual(__lowerCAmelCase , __lowerCAmelCase )
| 720 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any]="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) as f:
_lowerCamelCase : Tuple = json.load(A_ )
_lowerCamelCase : int = {}
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = []
for key, info in class_info.items():
_lowerCamelCase : Optional[int] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A_ ) )
_lowerCamelCase : List[Any] = thing_ids
_lowerCamelCase : Any = class_names
return metadata
class __snake_case ( unittest.TestCase):
def __init__( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=3_0 , __lowerCAmelCase : Optional[int]=4_0_0 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : str=False , __lowerCAmelCase : int=2_5_5 , __lowerCAmelCase : Dict="shi-labs/oneformer_demo" , __lowerCAmelCase : str="ade20k_panoptic.json" , __lowerCAmelCase : Dict=1_0 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : int = min_resolution
_lowerCamelCase : Tuple = max_resolution
_lowerCamelCase : Dict = do_resize
_lowerCamelCase : Dict = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : Optional[Any] = image_mean
_lowerCamelCase : Optional[int] = image_std
_lowerCamelCase : Any = class_info_file
_lowerCamelCase : Any = prepare_metadata(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = num_text
_lowerCamelCase : int = repo_path
# for the post_process_functions
_lowerCamelCase : int = 2
_lowerCamelCase : Union[str, Any] = 1_0
_lowerCamelCase : str = 1_0
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : List[Any] = 4
_lowerCamelCase : str = num_labels
_lowerCamelCase : Any = do_reduce_labels
_lowerCamelCase : Tuple = ignore_index
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
if not batched:
_lowerCamelCase : Tuple = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_lowerCamelCase , _lowerCamelCase : List[str] = image.size
else:
_lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : Any = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase : Union[str, Any] = self.size['''shortest_edge''']
_lowerCamelCase : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase : int = self.size['''shortest_edge''']
_lowerCamelCase : Optional[Any] = self.size['''shortest_edge''']
else:
_lowerCamelCase : Optional[Any] = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_lowerCamelCase : Optional[int] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ : List[str] = image_processing_class
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''ignore_index''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''class_info_file''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''num_text''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''repo_path''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''metadata''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Any = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Dict = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : int = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : List[Any] = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : int = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]="np" ):
"""simple docstring"""
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_lowerCamelCase : Optional[Any] = self.image_processing_tester.num_labels
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase )
if with_segmentation_maps:
_lowerCamelCase : int = num_labels
if is_instance_map:
_lowerCamelCase : List[Any] = list(range(__lowerCAmelCase ) ) * 2
_lowerCamelCase : Dict = dict(enumerate(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_lowerCamelCase : str = [Image.fromarray(__lowerCAmelCase ) for annotation in annotations]
_lowerCamelCase : List[str] = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , __lowerCAmelCase , return_tensors='''pt''' , instance_id_to_semantic_id=__lowerCAmelCase , pad_and_return_pixel_mask=__lowerCAmelCase , )
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
def common(__lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=None ):
_lowerCamelCase : Dict = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowerCAmelCase , is_instance_map=__lowerCAmelCase , segmentation_type=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inputs['''mask_labels''']
_lowerCamelCase : Union[str, Any] = inputs['''class_labels''']
_lowerCamelCase : Dict = inputs['''pixel_values''']
_lowerCamelCase : int = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowerCAmelCase )
common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' )
common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = np.zeros((2_0, 5_0) )
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = binary_mask_to_rle(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : Any = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_lowerCamelCase : Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_lowerCamelCase : Dict = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase , target_sizes=__lowerCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : Optional[Any] = image_processor.post_process_instance_segmentation(__lowerCAmelCase , threshold=0 )
self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : Optional[Any] = image_processor.post_process_panoptic_segmentation(__lowerCAmelCase , threshold=0 )
self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 598 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __lowerCamelCase ):
'''simple docstring'''
lowerCamelCase_ : Any = """marian"""
lowerCamelCase_ : Optional[Any] = ["""past_key_values"""]
lowerCamelCase_ : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCAmelCase=58_101 , __UpperCAmelCase=None , __UpperCAmelCase=1_024 , __UpperCAmelCase=12 , __UpperCAmelCase=4_096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4_096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=1_024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=58_100 , __UpperCAmelCase=False , __UpperCAmelCase=58_100 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
__A : str = vocab_size
__A : Optional[Any] = decoder_vocab_size or vocab_size
__A : List[Any] = max_position_embeddings
__A : Optional[Any] = d_model
__A : List[Any] = encoder_ffn_dim
__A : Dict = encoder_layers
__A : str = encoder_attention_heads
__A : Optional[Any] = decoder_ffn_dim
__A : Union[str, Any] = decoder_layers
__A : int = decoder_attention_heads
__A : int = dropout
__A : Dict = attention_dropout
__A : List[str] = activation_dropout
__A : List[str] = activation_function
__A : List[Any] = init_std
__A : Union[str, Any] = encoder_layerdrop
__A : Any = decoder_layerdrop
__A : List[str] = use_cache
__A : List[Any] = encoder_layers
__A : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__A : List[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class _a ( __lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase( self ):
if self.task in ["default", "seq2seq-lm"]:
__A : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__A : List[Any] = {0: '''batch'''}
__A : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__A : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
__A : str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__A : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__A : Any = self.num_layers
for i in range(__lowercase ):
__A : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
__A : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__A : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase( self ):
if self.task in ["default", "seq2seq-lm"]:
__A : Optional[Any] = super().outputs
else:
__A : List[Any] = super(__lowercase , self ).outputs
if self.use_past:
__A : List[str] = self.num_layers
for i in range(__lowercase ):
__A : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
__A : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
__A : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
__A : List[Any] = seq_length if not self.use_past else 1
__A : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__A : Dict = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__A : Optional[Any] = dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__A : Optional[int] = common_inputs['''input_ids'''].shape
__A : Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
__A : int = self.num_attention_heads
__A : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__A : Optional[Any] = decoder_seq_length + 3
__A : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__A : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowercase , __lowercase )] , dim=1 )
__A : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__A : Optional[Any] = self.num_layers
__A : Optional[int] = min(__lowercase , __lowercase )
__A : Optional[int] = max(__lowercase , __lowercase ) - min_num_layers
__A : List[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
__A : Dict = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
__A : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__A : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__A : Tuple = seqlen + 2
__A : List[str] = self.num_layers
__A : List[Any] = self.num_attention_heads
__A : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__A : List[Any] = common_inputs['''attention_mask'''].dtype
__A : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
__A : Tuple = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__A : Tuple = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__A : str = tokenizer.num_special_tokens_to_add(__lowercase )
__A : Tuple = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__A : str = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__A : Optional[int] = dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__A : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
__A : Tuple = self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__A : Dict = super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
__A : Optional[int] = super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def __UpperCAmelCase( self ):
return 1e-4
| 520 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt'''}
A__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
A__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
A__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = ConvBertTokenizer
def __init__( self :Any ,__lowercase :Optional[int]=None ,__lowercase :str=None ,__lowercase :Union[str, Any]=True ,__lowercase :Dict="[UNK]" ,__lowercase :List[Any]="[SEP]" ,__lowercase :int="[PAD]" ,__lowercase :Union[str, Any]="[CLS]" ,__lowercase :List[str]="[MASK]" ,__lowercase :List[Any]=True ,__lowercase :List[str]=None ,**__lowercase :List[str] ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Union[str, Any] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : int = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Tuple = normalizer_class(**__lowercase )
snake_case__ : Any = do_lower_case
def __lowerCamelCase ( self :int ,__lowercase :Union[str, Any] ,__lowercase :List[Any]=None ):
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 252 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646 | 0 |
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = len(lowercase__ )
snake_case_ = sum(lowercase__ )
snake_case_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case_ = True
for i in range(1 , s + 1 ):
snake_case_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case_ = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case_ = s - 2 * j
break
return diff
| 187 |
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case_ , snake_case_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 187 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
__UpperCAmelCase = {
"""gpt-neox-20b""": 2048,
}
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Optional[Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , __SCREAMING_SNAKE_CASE) != add_prefix_space:
UpperCamelCase__ : int =getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop("type"))
UpperCamelCase__ : List[Any] =add_prefix_space
UpperCamelCase__ : List[str] =pre_tok_class(**__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =add_prefix_space
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE) + [self.eos_token_id])
if len(__SCREAMING_SNAKE_CASE) > self.model_max_length:
UpperCamelCase__ : Optional[Any] =input_ids[-self.model_max_length :]
return input_ids
| 582 |
def _lowerCamelCase ( A_ : int , A_ : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 582 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=99 , lowercase=13 , lowercase=16 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=2 , lowercase=32 , lowercase=4 , lowercase=4 , lowercase=30 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=None , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = decoder_seq_length
# For common tests
lowerCamelCase_ = self.decoder_seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = d_model
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = decoder_start_token_id
lowerCamelCase_ = use_cache
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = None
lowerCamelCase_ = decoder_seq_length
lowerCamelCase_ = 2
lowerCamelCase_ = 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowerCamelCase_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCamelCase_ = True
lowerCamelCase_ = TrOCRDecoder(config=lowercase ).to(lowercase ).eval()
lowerCamelCase_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowerCamelCase_ = model(lowercase , use_cache=lowercase )
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase , use_cache=lowercase )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 )
lowerCamelCase_ = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = model(lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , past_key_values=lowercase )["last_hidden_state"]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowercase , lowercase , atol=1e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> str:
pass
def SCREAMING_SNAKE_CASE_( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE_( self ) -> str:
pass
| 463 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_( self ) -> int:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCamelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> int:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = 2
lowerCamelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase , device=torch.device(lowercase ) , )
lowerCamelCase_ = floats_tensor(control_image.shape , rng=random.Random(lowercase ) ).to(lowercase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(lowercase ) ).convert("RGB" ).resize((64, 64) )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowercase ):
if isinstance(lowercase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCamelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase )
torch.manual_seed(0 )
lowerCamelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = MultiControlNetModel([controlneta, controlneta] )
lowerCamelCase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> List[Any]:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = 2
lowerCamelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase , device=torch.device(lowercase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase , device=torch.device(lowercase ) , ),
]
lowerCamelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(lowercase ) ).to(lowercase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(lowercase ) ).convert("RGB" ).resize((64, 64) )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**lowercase )
pipe.to(lowercase )
lowerCamelCase_ = 1_0.0
lowerCamelCase_ = 4
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase )[0]
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowercase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=lowercase , controlnet=lowercase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = "evil space-punk bird"
lowerCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
lowerCamelCase_ = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
lowerCamelCase_ = pipe(
lowercase , lowercase , control_image=lowercase , generator=lowercase , output_type="np" , num_inference_steps=50 , strength=0.6 , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 463 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCamelCase_ : Optional[int] = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCamelCase_ : List[Any] = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCamelCase_ : Optional[int] = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def A ( self : str , lowercase : List[Any] , lowercase : Dict , lowercase : int = CHRF.CHAR_ORDER , lowercase : int = CHRF.WORD_ORDER , lowercase : int = CHRF.BETA , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , ) -> str:
'''simple docstring'''
UpperCamelCase__ = len(references[0] )
if any(len(lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCamelCase__ = [[refs[i] for refs in references] for i in range(lowercase )]
UpperCamelCase__ = CHRF(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCamelCase__ = sb_chrf.corpus_score(lowercase , lowercase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 265 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
'''simple docstring'''
def A ( self : List[str] , lowercase : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCamelCase__ = [tuple(lowercase ) if isinstance(lowercase , lowercase ) else key for key in keys]
UpperCamelCase__ = Counter(lowercase )
UpperCamelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def A ( self : List[str] , lowercase : int , lowercase : str=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ = super().construct_mapping(lowercase , deep=lowercase )
self._check_no_duplicates_on_constructed_node(lowercase )
return mapping
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCamelCase__ = full_content[1:].index("""---""" ) + 1
UpperCamelCase__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_A )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Tuple = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def A ( cls : int , lowercase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase )
else:
return cls()
def A ( self : int , lowercase : Path ) -> Dict:
'''simple docstring'''
if path.exists():
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ = readme_file.read()
else:
UpperCamelCase__ = None
UpperCamelCase__ = self._to_readme(lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowercase )
def A ( self : Any , lowercase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(lowercase )
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def A ( cls : Tuple , lowercase : str ) -> "DatasetMetadata":
'''simple docstring'''
UpperCamelCase__ = yaml.load(lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCamelCase__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase )
def A ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase , allow_unicode=lowercase , encoding="""utf-8""" , ).decode("""utf-8""" )
lowerCamelCase_ : str = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase_ : Tuple = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowerCamelCase_ : str = ap.parse_args()
lowerCamelCase_ : List[str] = Path(args.readme_filepath)
lowerCamelCase_ : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 265 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''xlm'''
UpperCamelCase = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : List[str] , _UpperCAmelCase : Tuple=30145 , _UpperCAmelCase : List[Any]=2048 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : str=16 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=512 , _UpperCAmelCase : Dict=2048**-0.5 , _UpperCAmelCase : List[Any]=1e-12 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Any="first" , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=None , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = emb_dim
UpperCAmelCase_ = n_layers
UpperCAmelCase_ = n_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = gelu_activation
UpperCAmelCase_ = sinusoidal_embeddings
UpperCAmelCase_ = causal
UpperCAmelCase_ = asm
UpperCAmelCase_ = n_langs
UpperCAmelCase_ = use_lang_emb
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = bos_index
UpperCAmelCase_ = eos_index
UpperCAmelCase_ = pad_index
UpperCAmelCase_ = unk_index
UpperCAmelCase_ = mask_index
UpperCAmelCase_ = is_encoder
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = embed_init_std
UpperCAmelCase_ = init_std
UpperCAmelCase_ = summary_type
UpperCAmelCase_ = summary_use_proj
UpperCAmelCase_ = summary_activation
UpperCAmelCase_ = summary_proj_to_labels
UpperCAmelCase_ = summary_first_dropout
UpperCAmelCase_ = start_n_top
UpperCAmelCase_ = end_n_top
UpperCAmelCase_ = mask_token_id
UpperCAmelCase_ = lang_id
if "n_words" in kwargs:
UpperCAmelCase_ = kwargs["n_words"]
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 82 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if "model" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.split(""".""" )[0].split("""_""" )[-1]
__SCREAMING_SNAKE_CASE = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
__SCREAMING_SNAKE_CASE = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
__SCREAMING_SNAKE_CASE = """yoso.""" + orig_key
return orig_key
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(_UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = orig_state_dict["""cls.predictions.decoder.bias"""]
__SCREAMING_SNAKE_CASE = torch.arange(_UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location="""cpu""" )["""model_state_dict"""]
__SCREAMING_SNAKE_CASE = YosoConfig.from_json_file(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = YosoForMaskedLM(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = convert_checkpoint_helper(config.max_position_embeddings , _UpperCAmelCase )
print(model.load_state_dict(_UpperCAmelCase ) )
model.eval()
model.save_pretrained(_UpperCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 715 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 0 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ = 300 # TEMPERATURE (unit = K)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
def _lowerCamelCase ( __lowerCamelCase ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
UpperCAmelCase__ : List[str] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 79 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
from __future__ import annotations
def a_ ( _A , _A ) -> str:
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(_A ) <= 1 or n <= 1:
return
insert_next(_A , n - 1 )
rec_insertion_sort(_A , n - 1 )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(_A ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case__ , snake_case__ = (
collection[index],
collection[index - 1],
)
insert_next(_A , index + 1 )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = input("""Enter integers separated by spaces: """)
__UpperCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 372 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
return image
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(_A )
SCREAMING_SNAKE_CASE__ = val
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE__ = torch.cat((q_bias, torch.zeros_like(_A , requires_grad=_A ), v_bias) )
SCREAMING_SNAKE_CASE__ = qkv_bias
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 3_64 if '''coco''' in model_name else 2_24
SCREAMING_SNAKE_CASE__ = BlipaVisionConfig(image_size=_A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE__ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_A ).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE__ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_A ).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
SCREAMING_SNAKE_CASE__ = BlipaConfig(vision_config=_A , text_config=_A )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A=None , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
SCREAMING_SNAKE_CASE__ = tokenizer('''\n''' , add_special_tokens=_A ).input_ids[0]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = get_blipa_config(_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE__ = BlipaForConditionalGeneration(_A ).eval()
SCREAMING_SNAKE_CASE__ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
SCREAMING_SNAKE_CASE__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = load_model_and_preprocess(
name=_A , model_type=_A , is_eval=_A , device=_A )
original_model.eval()
print('''Done!''' )
# update state dict keys
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE__ = state_dict.pop(_A )
if key.startswith('''Qformer.bert''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''t5''' , '''language''' )
SCREAMING_SNAKE_CASE__ = val
# read in qv biases
read_in_q_v_bias(_A , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = hf_model.load_state_dict(_A , strict=_A )
assert len(_A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE__ = load_demo_image()
SCREAMING_SNAKE_CASE__ = vis_processors['''eval'''](_A ).unsqueeze(0 ).to(_A )
SCREAMING_SNAKE_CASE__ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_A )
# create processor
SCREAMING_SNAKE_CASE__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_A , image_std=_A )
SCREAMING_SNAKE_CASE__ = BlipaProcessor(image_processor=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE__ = processor(images=_A , return_tensors='''pt''' ).pixel_values.to(_A )
# make sure processor creates exact same pixel values
assert torch.allclose(_A , _A )
original_model.to(_A )
hf_model.to(_A )
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE__ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
SCREAMING_SNAKE_CASE__ = hf_model(_A , _A ).logits
else:
SCREAMING_SNAKE_CASE__ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
SCREAMING_SNAKE_CASE__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
SCREAMING_SNAKE_CASE__ = hf_model(_A , _A , labels=_A ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=_A )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=_A )
else:
# cast to same type
SCREAMING_SNAKE_CASE__ = logits.dtype
assert torch.allclose(original_logits.to(_A ) , _A , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = tokenizer(_A , return_tensors='''pt''' ).input_ids.to(_A )
SCREAMING_SNAKE_CASE__ = original_model.generate({'''image''': original_pixel_values} )
SCREAMING_SNAKE_CASE__ = hf_model.generate(
_A , _A , do_sample=_A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _A )
SCREAMING_SNAKE_CASE__ = input_ids.shape[1]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE__ = [text.strip() for text in output_text]
print('''HF generation:''' , _A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_A )
hf_model.save_pretrained(_A )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 493 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 493 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Tuple = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase, _UpperCamelCase )
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase , __UpperCAmelCase : Dict = emb.weight.shape
__UpperCAmelCase : Union[str, Any] = nn.Linear(_UpperCamelCase, _UpperCamelCase, bias=_UpperCamelCase )
__UpperCAmelCase : Optional[int] = emb.weight.data
return lin_layer
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=None ):
__UpperCAmelCase : Optional[Any] = {}
for old_key in state_dict.keys():
__UpperCAmelCase : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__UpperCAmelCase : Optional[Any] = key.replace("moe_layer.experts.0", F"ffn.experts.expert_{expert_idx}" )
else:
__UpperCAmelCase : List[str] = key.replace("moe_layer.experts.", "ffn.experts.expert_" )
if "gate" in key:
__UpperCAmelCase : List[str] = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
__UpperCAmelCase : Any = key.replace(".fc2.", ".ffn.fc2." )
if "fc1" and "experts" not in key:
__UpperCAmelCase : int = key.replace(".fc1.", ".ffn.fc1." )
if ".encoder_attn." in key:
__UpperCAmelCase : Tuple = key.replace(".encoder_attn.", ".cross_attention." )
if "encoder_attn_layer_norm" in key:
__UpperCAmelCase : Dict = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm" )
if "final_layer_norm" in key:
__UpperCAmelCase : str = key.replace("final_layer_norm", "ff_layer_norm" )
__UpperCAmelCase : Any = state_dict[old_key]
return new_dict
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = WEIGHTS_NAME ):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Any = 0
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
for expert in range(_UpperCamelCase ):
__UpperCAmelCase : Dict = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(_UpperCamelCase ):
__UpperCAmelCase : Tuple = torch.load(_UpperCamelCase )["model"]
remove_ignore_keys_(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = rename_fairseq_keys(_UpperCamelCase, _UpperCamelCase )
__UpperCAmelCase : List[Any] = os.path.join(
_UpperCamelCase, weights_name.replace(".bin", F"-{len(_UpperCamelCase )+1:05d}-of-???.bin" ) )
torch.save(_UpperCamelCase, _UpperCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_UpperCamelCase )[0]].dtype )
# Add the last block
__UpperCAmelCase : List[str] = os.path.join(_UpperCamelCase, weights_name.replace(".bin", F"-{len(_UpperCamelCase )+1:05d}-of-???.bin" ) )
__UpperCAmelCase : Any = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_UpperCamelCase, _UpperCamelCase )
__UpperCAmelCase : Tuple = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_UpperCamelCase ) == 1:
__UpperCAmelCase : Union[str, Any] = os.path.join(_UpperCamelCase, _UpperCamelCase )
torch.save(_UpperCamelCase, _UpperCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_UpperCamelCase, _UpperCamelCase )
# Otherwise, let's build the index
__UpperCAmelCase : Tuple = {}
for idx, shard in enumerate(_UpperCamelCase ):
__UpperCAmelCase : Tuple = weights_name.replace(".bin", F"-{idx+1:05d}-of-{len(_UpperCamelCase ):05d}.bin" )
__UpperCAmelCase : List[str] = os.path.join(_UpperCamelCase, weights_name.replace(".bin", F"-{idx+1:05d}-of-???.bin" ) )
os.rename(_UpperCamelCase, os.path.join(_UpperCamelCase, _UpperCamelCase ) )
for key in shard:
__UpperCAmelCase : Union[str, Any] = shard_file
# Add the metadata
__UpperCAmelCase : Optional[Any] = {"total_size": total_size}
__UpperCAmelCase : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_UpperCamelCase, _UpperCamelCase ), "w", encoding="utf-8" ) as f:
__UpperCAmelCase : List[Any] = json.dumps(_UpperCamelCase, indent=2, sort_keys=_UpperCamelCase ) + "\n"
f.write(_UpperCamelCase )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
lowerCAmelCase__ : Tuple = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase__ : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 721 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ : str = "path-to-your-trained-model"
lowerCAmelCase__ : Any = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowerCAmelCase__ : Tuple = "A photo of sks dog in a bucket"
lowerCAmelCase__ : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 329 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Any:
for e in env_keys:
lowerCamelCase : Optional[int] = int(os.environ.get(UpperCamelCase__ , -1 ) )
if val >= 0:
return val
return default
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=False ) -> str:
lowerCamelCase : List[Any] = os.environ.get(UpperCamelCase__ , str(UpperCamelCase__ ) )
return strtobool(UpperCamelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]="no" ) -> Optional[int]:
lowerCamelCase : Any = os.environ.get(UpperCamelCase__ , str(UpperCamelCase__ ) )
return value
| 222 |
"""simple docstring"""
from string import ascii_uppercase
__lowerCamelCase :Dict = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase :str = dict(enumerate(ascii_uppercase))
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Tuple = len(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = 0
while True:
if x == i:
lowerCamelCase : Tuple = 0
if len(UpperCamelCase__ ) == len(UpperCamelCase__ ):
break
key += key[i]
i += 1
return key
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Any = """"""
lowerCamelCase : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase : Union[str, Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Dict = """"""
lowerCamelCase : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case ( ) -> None:
lowerCamelCase : int = """THE GERMAN ATTACK"""
lowerCamelCase : Union[str, Any] = """SECRET"""
lowerCamelCase : Union[str, Any] = generate_key(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = cipher_text(UpperCamelCase__ , UpperCamelCase__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(UpperCamelCase__ , UpperCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 222 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a= logging.get_logger(__name__) # pylint: disable=invalid-name
a= '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def _UpperCamelCase ( _a : Optional[Any] , _a : List[Any] , _a : List[str]=8 ):
"""simple docstring"""
__UpperCamelCase : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCamelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
__UpperCamelCase : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if latents is None:
__UpperCamelCase : Tuple = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__UpperCamelCase : List[str] = latents.to(_lowerCamelCase )
__UpperCamelCase : Any = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase ( self , _lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__UpperCamelCase : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
__UpperCamelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__UpperCamelCase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCamelCase : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCamelCase : int = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
__UpperCamelCase : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 1_0_0 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
__UpperCamelCase : Dict = self._execution_device
__UpperCamelCase : Optional[int] = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : List[Any] = torch.cat(_lowerCamelCase , dim=0 )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Tuple = torch.cat(_lowerCamelCase , dim=0 )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Optional[int] = torch.cat(_lowerCamelCase , dim=0 )
__UpperCamelCase : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCamelCase : Optional[Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
__UpperCamelCase : Dict = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
__UpperCamelCase : Optional[Any] = hint.repeat_interleave(_lowerCamelCase , dim=0 )
__UpperCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
__UpperCamelCase : Any = self.scheduler.timesteps
__UpperCamelCase : int = self.movq.config.latent_channels
__UpperCamelCase : Any = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
__UpperCamelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCamelCase : str = {'image_embeds': image_embeds, 'hint': hint}
__UpperCamelCase : Union[str, Any] = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
__UpperCamelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
__UpperCamelCase : Union[str, Any] = noise_pred.chunk(2 )
__UpperCamelCase : Union[str, Any] = variance_pred.chunk(2 )
__UpperCamelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCamelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCamelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase : List[str] = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
__UpperCamelCase : Optional[Any] = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__UpperCamelCase : Tuple = image * 0.5 + 0.5
__UpperCamelCase : Optional[int] = image.clamp(0 , 1 )
__UpperCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCamelCase : Union[str, Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 721 | '''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__UpperCamelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def lowerCAmelCase ( self , _lowerCamelCase ):
return "lower newer", "lower newer"
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : List[Any] = 'lower'
__UpperCamelCase : Union[str, Any] = ['low', 'er</w>']
__UpperCamelCase : Tuple = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : List[str] = tokens + ['<unk>']
__UpperCamelCase : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
__UpperCamelCase : int = 'This is a simple input'
__UpperCamelCase : Any = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase : Union[str, Any] = ('This is a simple input', 'This is a pair')
__UpperCamelCase : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
def lowerCAmelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
pass
| 287 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _A ):
__lowerCamelCase : Dict =(DDIMParallelScheduler,)
__lowerCamelCase : Optional[int] =(("eta", 0.0), ("num_inference_steps", 50))
def UpperCamelCase_ ( self : str , **__lowercase : Optional[int] ):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : str ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10, 0.0
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for t in scheduler.timesteps:
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowercase )
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(steps_offset=1 )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowercase , prediction_type=__lowercase , sample_max_value=__lowercase , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowercase , num_inference_steps=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowercase , eta=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = 10, 0.0
scheduler.set_timesteps(__lowercase )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0 )
__a = torch.arange(__lowercase )[0:3, None].repeat(1 , __lowercase )
__a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a = scheduler.batch_step_no_noise(__lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowercase )
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.full_loop(prediction_type="""v_prediction""" )
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
__a = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.01 )
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
__a = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.01 )
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 225 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : List[str] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def lowercase ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : Optional[Any]=None ):
if conf_path is None:
lowercase_ : str = '''./model_checkpoints/vqgan_only.yaml'''
lowercase_ : str = load_config(__snake_case , display=__snake_case )
lowercase_ : Optional[int] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase_ : List[str] = '''./model_checkpoints/vqgan_only.pt'''
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase_ : List[Any] = sd['''state_dict''']
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def lowercase ( __snake_case : Tuple , __snake_case : int ):
lowercase_ , lowercase_ , lowercase_ : List[Any] = model.encode(__snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowercase_ : Optional[int] = model.decode(__snake_case )
return xrec
def lowercase ( __snake_case : Any , __snake_case : List[str]=False ):
lowercase_ , lowercase_ : Optional[Any] = string.rsplit('''.''' , 1 )
if reload:
lowercase_ : Union[str, Any] = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def lowercase ( __snake_case : List[Any] ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowercase ( __snake_case : Any , __snake_case : List[str] , __snake_case : Tuple=True , __snake_case : Dict=True ):
lowercase_ : str = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : str ):
# load the specified checkpoint
if ckpt:
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location='''cpu''' )
lowercase_ : Any = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
lowercase_ : Optional[Any] = {'''state_dict''': None}
lowercase_ : Dict = None
lowercase_ : Union[str, Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__snake_case , eval_mode=__snake_case )['''model''']
return model, global_step
| 231 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: List[str] = logging.get_logger(__name__)
UpperCamelCase__: Dict = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """xlm-prophetnet"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Any , __snake_case : Optional[float] = 0.1 , __snake_case : Optional[Union[str, Callable]] = "gelu" , __snake_case : Optional[int] = 30522 , __snake_case : Optional[int] = 1024 , __snake_case : Optional[int] = 4096 , __snake_case : Optional[int] = 12 , __snake_case : Optional[int] = 16 , __snake_case : Optional[int] = 4096 , __snake_case : Optional[int] = 12 , __snake_case : Optional[int] = 16 , __snake_case : Optional[float] = 0.1 , __snake_case : Optional[float] = 0.1 , __snake_case : Optional[int] = 512 , __snake_case : Optional[float] = 0.02 , __snake_case : Optional[bool] = True , __snake_case : Optional[bool] = True , __snake_case : Optional[int] = 0 , __snake_case : Optional[int] = 2 , __snake_case : Optional[int] = 32 , __snake_case : Optional[int] = 128 , __snake_case : Optional[bool] = False , __snake_case : Optional[float] = 0.0 , __snake_case : Optional[bool] = True , __snake_case : Optional[int] = 0 , __snake_case : Optional[int] = 1 , __snake_case : Optional[int] = 2 , **__snake_case : Dict , ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : List[str] = num_encoder_layers
UpperCAmelCase : Tuple = num_encoder_attention_heads
UpperCAmelCase : Dict = decoder_ffn_dim
UpperCAmelCase : str = num_decoder_layers
UpperCAmelCase : Optional[int] = num_decoder_attention_heads
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : str = init_std # Normal(0, this parameter)
UpperCAmelCase : Optional[Any] = activation_function
# parameters for xlmprophetnet
UpperCAmelCase : Union[str, Any] = ngram
UpperCAmelCase : List[Any] = num_buckets
UpperCAmelCase : int = relative_max_distance
UpperCAmelCase : List[str] = disable_ngram_loss
UpperCAmelCase : int = eps
# 3 Types of Dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : int = activation_dropout
UpperCAmelCase : Any = dropout
UpperCAmelCase : Tuple = use_cache
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , add_cross_attention=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
@property
def A ( self : List[Any] ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def A ( self : Any , __snake_case : Optional[Any] ) -> List[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 528 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase__: int = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
UpperCamelCase__: Dict = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
UpperCamelCase__: str = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ) -> Dict:
return float((preds == labels).mean() )
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict="binary" ) -> Any:
UpperCAmelCase : str = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : str = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : str ) -> List[str]:
UpperCAmelCase : Optional[int] = {}
for id_pred, label in zip(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
UpperCAmelCase : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase : Union[str, Any] = [(pred, label)]
UpperCAmelCase , UpperCAmelCase : Dict = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase , UpperCAmelCase : List[Any] = zip(*_lowerCAmelCase )
UpperCAmelCase : int = fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average='''macro''' )
fas.append(_lowerCAmelCase )
UpperCAmelCase : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(_lowerCAmelCase ) )
ems.append(_lowerCAmelCase )
UpperCAmelCase : int = float(sum(_lowerCAmelCase ) / len(_lowerCAmelCase ) )
UpperCAmelCase : Optional[int] = sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
UpperCAmelCase : str = float(fa_score(y_true=_lowerCAmelCase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE( datasets.Metric ):
"""simple docstring"""
def A ( self : Optional[Any] ) -> Dict:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def A ( self : Union[str, Any] ) -> Dict:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def A ( self : Dict , __snake_case : str , __snake_case : str ) -> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "cb":
return acc_and_fa(__snake_case , __snake_case , fa_avg='''macro''' )
elif self.config_name == "record":
UpperCAmelCase : Tuple = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
UpperCAmelCase : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__snake_case , __snake_case )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__snake_case , __snake_case )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 528 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def a__ ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : int ):
UpperCAmelCase__ :Any = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def a__ ( UpperCamelCase_ : List[Any], UpperCamelCase_ : str ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase__ :Union[str, Any] = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ :Optional[int] = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase__ :Optional[int] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase__ :Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a__ ( UpperCamelCase_ : List[Any], UpperCamelCase_ : Tuple, UpperCamelCase_ : List[Any] ):
UpperCAmelCase__ :int = dct.pop(UpperCamelCase_ )
UpperCAmelCase__ :List[Any] = val
def a__ ( UpperCamelCase_ : Dict ):
if "handwritten" in checkpoint_url:
UpperCAmelCase__ :Optional[Any] = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase__ :int = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
UpperCAmelCase__ :Union[str, Any] = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def a__ ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Optional[int] ):
UpperCAmelCase__ :Optional[int] = ViTConfig(image_size=384, qkv_bias=UpperCamelCase_ )
UpperCAmelCase__ :Union[str, Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase__ :Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase__ :Dict = 1_024
UpperCAmelCase__ :Optional[int] = 4_096
UpperCAmelCase__ :str = 24
UpperCAmelCase__ :List[Any] = 16
UpperCAmelCase__ :List[str] = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase__ :str = False
UpperCAmelCase__ :List[str] = '''relu'''
UpperCAmelCase__ :Optional[Any] = 1_024
UpperCAmelCase__ :Optional[Any] = True
UpperCAmelCase__ :Tuple = False
UpperCAmelCase__ :Union[str, Any] = False
# load HuggingFace model
UpperCAmelCase__ :Any = ViTModel(UpperCamelCase_, add_pooling_layer=UpperCamelCase_ )
UpperCAmelCase__ :Dict = TrOCRForCausalLM(UpperCamelCase_ )
UpperCAmelCase__ :List[Any] = VisionEncoderDecoderModel(encoder=UpperCamelCase_, decoder=UpperCamelCase_ )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase_, map_location='''cpu''', check_hash=UpperCamelCase_ )['''model''']
UpperCAmelCase__ :Tuple = create_rename_keys(UpperCamelCase_, UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_, UpperCamelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase__ :Dict = state_dict.pop(UpperCamelCase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
UpperCAmelCase__ :Optional[Any] = val
else:
UpperCAmelCase__ :str = val
# load state dict
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image
UpperCAmelCase__ :Any = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase__ :str = RobertaTokenizer.from_pretrained('''roberta-large''' )
UpperCAmelCase__ :int = TrOCRProcessor(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Dict = processor(images=prepare_img(UpperCamelCase_ ), return_tensors='''pt''' ).pixel_values
# verify logits
UpperCAmelCase__ :List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase__ :Optional[int] = model(pixel_values=UpperCamelCase_, decoder_input_ids=UpperCamelCase_ )
UpperCAmelCase__ :List[Any] = outputs.logits
UpperCAmelCase__ :Union[str, Any] = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase__ :List[str] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase__ :Optional[int] = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase__ :List[str] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase__ :Dict = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], UpperCamelCase_, atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 467 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
| 467 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> str:
'''simple docstring'''
__UpperCamelCase = 0
@slow
def A__ ( self )-> Tuple:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check that tokenizer_type ≠ model_type
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def A__ ( self )-> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.txt''' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type='''bert''' , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''merges.txt''' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type='''gpt2''' , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_tokenizers
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.txt''' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type='''bert''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''merges.txt''' ) )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> int:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCamelCase = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE_ )
else:
self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def A__ ( self )-> int:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
__UpperCamelCase = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = TOKENIZER_MAPPING.values()
__UpperCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(SCREAMING_SNAKE_CASE_ )
@require_tokenizers
def A__ ( self )-> Tuple:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , SCREAMING_SNAKE_CASE_ )
@require_tokenizers
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = '''Hello, world. How are you?'''
__UpperCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertEqual('''[UNK]''' , tokens[0] )
__UpperCamelCase = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = get_tokenizer_config('''bert-base-cased''' )
__UpperCamelCase = config.pop('''_commit_hash''' , SCREAMING_SNAKE_CASE_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCamelCase = get_tokenizer_config(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = get_tokenizer_config(SCREAMING_SNAKE_CASE_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def A__ ( self )-> List[Any]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ )
# Can register in two steps
AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ )
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A__ ( self )-> Tuple:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def A__ ( self )-> int:
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = False
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = NewTokenizer
_snake_case = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base''' )
def A__ ( self )-> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='''aaaaaa''' )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 451 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase__ : List[str] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 451 | 1 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
snake_case__ : str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
snake_case__ : Dict = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
snake_case__ : Optional[Any] = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case__ : Dict = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 704 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = parent
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {}
def _snake_case ():
UpperCamelCase_ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
UpperCamelCase_ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def _UpperCAmelCase ( self ) -> Any:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _UpperCAmelCase ( self ) -> Dict:
# Initialize feature_extractor
UpperCamelCase_ = self.feature_extraction_class()
# Test not batched input
UpperCamelCase_ = get_html_strings()[0]
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
UpperCamelCase_ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
# Test batched
UpperCamelCase_ = get_html_strings()
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = expected_nodes + [['My First Heading', 'My first paragraph.']]
UpperCamelCase_ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
| 618 | 0 |
"""simple docstring"""
def _a ( UpperCAmelCase__ = 2_00_00_00 ) -> str:
__SCREAMING_SNAKE_CASE = [0 for i in range(n + 1 )]
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 482 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A : List[str] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 100
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_UpperCAmelCase = scount * numref
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_UpperCAmelCase = ccount * numref
# KEEP
_UpperCAmelCase = sgramcounter_rep & cgramcounter_rep
_UpperCAmelCase = keepgramcounter_rep & rgramcounter
_UpperCAmelCase = sgramcounter_rep & rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_UpperCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_UpperCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_UpperCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_UpperCAmelCase = sgramcounter_rep - cgramcounter_rep
_UpperCAmelCase = delgramcounter_rep - rgramcounter
_UpperCAmelCase = sgramcounter_rep - rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_UpperCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ssent.split(''' ''' )
_UpperCAmelCase = csent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for rsent in rsents:
_UpperCAmelCase = rsent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_UpperCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_UpperCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_UpperCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
if lowercase:
_UpperCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_UpperCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_UpperCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_UpperCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sentence
if not return_str:
_UpperCAmelCase = normalized_sent.split()
return normalized_sent
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_UpperCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_UpperCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 100 * sari_score
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str="exp" , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , ):
'''simple docstring'''
_UpperCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : Dict )->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] )->Any:
_UpperCAmelCase = {}
result.update({'''sari''': compute_sari(sources=__UpperCamelCase , predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
return result
| 602 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
_lowercase : List[Any] = '''dpt'''
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=384 , _lowercase=16 , _lowercase=3 , _lowercase=False , _lowercase=True , _lowercase=[2, 5, 8, 11] , _lowercase="project" , _lowercase=[4, 2, 1, 0.5] , _lowercase=[96, 192, 384, 768] , _lowercase=256 , _lowercase=-1 , _lowercase=False , _lowercase=True , _lowercase=0.4 , _lowercase=255 , _lowercase=0.1 , _lowercase=[1, 1_024, 24, 24] , _lowercase=[0, 1] , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(**lowercase__ )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_lowerCAmelCase = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_lowerCAmelCase = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_lowerCAmelCase = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_lowerCAmelCase = backbone_featmap_shape
_lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_lowerCAmelCase = readout_type
_lowerCAmelCase = reassemble_factors
_lowerCAmelCase = neck_hidden_sizes
_lowerCAmelCase = fusion_hidden_size
_lowerCAmelCase = head_in_index
_lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase = use_auxiliary_head
_lowerCAmelCase = auxiliary_loss_weight
_lowerCAmelCase = semantic_loss_ignore_index
_lowerCAmelCase = semantic_classifier_dropout
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 702 |
'''simple docstring'''
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 0 |
import re
def A ( snake_case__ : str ) -> bool:
'''simple docstring'''
__snake_case = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(snake_case__ , snake_case__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 313 |
import fire
from utils import calculate_rouge, save_json
def A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str=None , **snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = [x.strip() for x in open(snake_case__ ).readlines()]
__snake_case = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
__snake_case = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 313 | 1 |
'''simple docstring'''
def __a ( __lowerCamelCase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
lowercase_ = []
for temp in range(int(__lowerCamelCase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Dict = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 461 | '''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def __UpperCAmelCase ( self : int) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self : Dict) -> Any:
lowercase_ , lowercase_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ , lowercase_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ = controlnet_params
lowercase_ = "bird"
lowercase_ = jax.device_count()
lowercase_ = pipe.prepare_text_inputs([prompts] * num_samples)
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
lowercase_ = pipe.prepare_image_inputs([canny_image] * num_samples)
lowercase_ = jax.random.PRNGKey(0)
lowercase_ = jax.random.split(__lowerCAmelCase , jax.device_count())
lowercase_ = replicate(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
lowercase_ = images[0, 253:256, 253:256, -1]
lowercase_ = jnp.asarray(jax.device_get(image_slice.flatten()))
lowercase_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(F'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Tuple) -> List[str]:
lowercase_ , lowercase_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ , lowercase_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ = controlnet_params
lowercase_ = "Chef in the kitchen"
lowercase_ = jax.device_count()
lowercase_ = pipe.prepare_text_inputs([prompts] * num_samples)
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
lowercase_ = pipe.prepare_image_inputs([pose_image] * num_samples)
lowercase_ = jax.random.PRNGKey(0)
lowercase_ = jax.random.split(__lowerCAmelCase , jax.device_count())
lowercase_ = replicate(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
lowercase_ = images[0, 253:256, 253:256, -1]
lowercase_ = jnp.asarray(jax.device_get(image_slice.flatten()))
lowercase_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(F'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 461 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Tuple = KandinskyVaaControlnetImgaImgPipeline
lowercase : Union[str, Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowercase : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowercase : Any = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : Any = False
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return 1_00
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : Any = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase : Optional[Any] = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : str = self.dummy_unet
__UpperCamelCase : Dict = self.dummy_movq
__UpperCamelCase : Union[str, Any] = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__UpperCamelCase : List[str] = DDIMScheduler(**__UpperCamelCase )
__UpperCamelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> int:
'''simple docstring'''
__UpperCamelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__UpperCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create init_image
__UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Any = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((2_56, 2_56) )
# create hint
__UpperCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(__UpperCamelCase )
else:
__UpperCamelCase : List[str] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__UpperCamelCase : Optional[int] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = "cpu"
__UpperCamelCase : Union[str, Any] = self.get_dummy_components()
__UpperCamelCase : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
__UpperCamelCase : Tuple = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCamelCase : Tuple = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
__UpperCamelCase : List[Any] = output.images
__UpperCamelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
__UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__UpperCamelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Dict = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
__UpperCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase : str = init_image.resize((5_12, 5_12) )
__UpperCamelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
__UpperCamelCase : List[Any] = torch.from_numpy(np.array(__UpperCamelCase ) ).float() / 255.0
__UpperCamelCase : str = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__UpperCamelCase : Optional[int] = "A robot, 4k photo"
__UpperCamelCase : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
__UpperCamelCase : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
__UpperCamelCase : List[str] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCamelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase : int = pipe_prior(
__UpperCamelCase , image=__UpperCamelCase , strength=0.85 , generator=__UpperCamelCase , negative_prompt="" , ).to_tuple()
__UpperCamelCase : Optional[Any] = pipeline(
image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , hint=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="np" , )
__UpperCamelCase : str = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase ) | 327 |
from ... import PretrainedConfig
lowercase : Dict = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase : Union[str, Any] = 'nezha'
def __init__( self , __UpperCamelCase=2_11_28 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0.1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : int = vocab_size
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : Tuple = num_attention_heads
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : str = max_relative_position
__UpperCamelCase : List[str] = type_vocab_size
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : int = classifier_dropout
__UpperCamelCase : List[str] = use_cache | 327 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = XGLMTokenizer
lowerCAmelCase = XGLMTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def a__ ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : str = XGLMTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = '''<pad>'''
UpperCAmelCase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,1_008 )
def a__ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size ,1_008 )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Dict = XGLMTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase_ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
@cached_property
def a__ ( self ) -> Tuple:
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def a__ ( self ) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_SCREAMING_SNAKE_CASE ,f.name )
UpperCAmelCase_ : int = XGLMTokenizer(f.name ,keep_accents=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = pickle.dumps(_SCREAMING_SNAKE_CASE )
pickle.loads(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : int = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Any = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.get_rust_tokenizer()
UpperCAmelCase_ : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = '''Hello World!'''
UpperCAmelCase_ : Optional[int] = [2, 31_227, 4_447, 35]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ : str = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_SCREAMING_SNAKE_CASE ,self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def a__ ( self ) -> Any:
# fmt: off
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE ,model_name='''facebook/xglm-564M''' ,padding=_SCREAMING_SNAKE_CASE ,) | 715 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt'}
__a = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ConvBertTokenizer
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="[UNK]" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[PAD]" ,_SCREAMING_SNAKE_CASE="[CLS]" ,_SCREAMING_SNAKE_CASE="[MASK]" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=_SCREAMING_SNAKE_CASE ,strip_accents=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE ,normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : Dict = strip_accents
UpperCAmelCase_ : Optional[Any] = tokenize_chinese_chars
UpperCAmelCase_ : Union[str, Any] = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = do_lower_case
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE ) | 300 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 1_28 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : float = 2000.0 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : float = 0.1 , ) -> Any:
super().__init__()
__lowerCamelCase = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE__ , d_model * 4 , bias=SCREAMING_SNAKE_CASE__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=SCREAMING_SNAKE_CASE__ ) , nn.SiLU() , )
__lowerCamelCase = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = False
__lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__ ):
# FiLM conditional T5 decoder
__lowerCamelCase = DecoderLayer(d_model=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ )
self.decoders.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
__lowerCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase = self.conditioning_emb(SCREAMING_SNAKE_CASE__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase = self.position_encoding(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE__ )
inputs += position_encodings
__lowerCamelCase = self.dropout(SCREAMING_SNAKE_CASE__ )
# decoder: No padding present.
__lowerCamelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase = lyr(
SCREAMING_SNAKE_CASE__ , conditioning_emb=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , )[0]
__lowerCamelCase = self.decoder_norm(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.post_dropout(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.spec_out(SCREAMING_SNAKE_CASE__ )
return spec_out
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-6 ) -> List[Any]:
super().__init__()
__lowerCamelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ ) )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> List[str]:
__lowerCamelCase = self.layer[0](
SCREAMING_SNAKE_CASE__ , conditioning_emb=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , )
if encoder_hidden_states is not None:
__lowerCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase = self.layer[1](
SCREAMING_SNAKE_CASE__ , key_value_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase = self.layer[-1](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (hidden_states,)
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
super().__init__()
__lowerCamelCase = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = Attention(query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , out_bias=SCREAMING_SNAKE_CASE__ , scale_qk=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Dropout(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None , ) -> str:
# pre_self_attention_layer_norm
__lowerCamelCase = self.layer_norm(SCREAMING_SNAKE_CASE__ )
if conditioning_emb is not None:
__lowerCamelCase = self.FiLMLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Self-attention block
__lowerCamelCase = self.attention(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
super().__init__()
__lowerCamelCase = Attention(query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , out_bias=SCREAMING_SNAKE_CASE__ , scale_qk=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TaLayerNorm(SCREAMING_SNAKE_CASE__ , eps=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Dropout(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Union[str, Any]:
__lowerCamelCase = self.layer_norm(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.attention(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ )
return layer_output
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
super().__init__()
__lowerCamelCase = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TaLayerNorm(SCREAMING_SNAKE_CASE__ , eps=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Dropout(SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Dict:
__lowerCamelCase = self.layer_norm(SCREAMING_SNAKE_CASE__ )
if conditioning_emb is not None:
__lowerCamelCase = self.film(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.DenseReluDense(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
super().__init__()
__lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.Dropout(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = NewGELUActivation()
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
__lowerCamelCase = self.act(self.wi_a(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.wi_a(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_gelu * hidden_linear
__lowerCamelCase = self.dropout(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.wo(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-6 ) -> List[str]:
super().__init__()
__lowerCamelCase = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = eps
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(SCREAMING_SNAKE_CASE__ , 3.0 )) ))
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
super().__init__()
__lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , out_features * 2 , bias=SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = self.scale_bias(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 , -1 )
__lowerCamelCase = x * (1 + scale) + shift
return x
| 298 |
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = [1]
for i in range(2 , __lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCamelCase = []
__lowerCamelCase = list(range(__lowerCAmelCase ) )
# Find permutation
while factorials:
__lowerCamelCase = factorials.pop()
__lowerCamelCase , __lowerCamelCase = divmod(__lowerCAmelCase , __lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Union[str, Any] =logging.get_logger(__name__)
A__ : Union[str, Any] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''transfo-xl'''
lowerCamelCase =['''mems''']
lowerCamelCase ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , lowerCamelCase : int=26_77_35 , lowerCamelCase : List[str]=[2_00_00, 4_00_00, 20_00_00] , lowerCamelCase : Tuple=10_24 , lowerCamelCase : Union[str, Any]=10_24 , lowerCamelCase : Any=16 , lowerCamelCase : str=64 , lowerCamelCase : str=40_96 , lowerCamelCase : Dict=4 , lowerCamelCase : List[str]=False , lowerCamelCase : Optional[Any]=18 , lowerCamelCase : Any=16_00 , lowerCamelCase : Dict=10_00 , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : str=0 , lowerCamelCase : Union[str, Any]=-1 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : List[Any]="normal" , lowerCamelCase : Optional[int]=0.01 , lowerCamelCase : Dict=0.01 , lowerCamelCase : int=0.02 , lowerCamelCase : Dict=1e-5 , lowerCamelCase : str=0 , **lowerCamelCase : Any , ):
"""simple docstring"""
__A : Any = vocab_size
__A : Optional[int] = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
__A : List[Any] = [False] + [True] * len(self.cutoffs )
else:
__A : List[Any] = [False] + [False] * len(self.cutoffs )
__A : Union[str, Any] = d_model
__A : Optional[Any] = d_embed
__A : int = d_head
__A : int = d_inner
__A : Any = div_val
__A : Optional[Any] = pre_lnorm
__A : int = n_layer
__A : Optional[int] = n_head
__A : Optional[int] = mem_len
__A : List[str] = same_length
__A : int = attn_type
__A : Any = clamp_len
__A : Optional[Any] = sample_softmax
__A : Tuple = adaptive
__A : List[str] = dropout
__A : str = dropatt
__A : Optional[Any] = untie_r
__A : Tuple = init
__A : str = init_range
__A : Any = proj_init_std
__A : Any = init_std
__A : int = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def lowercase_( self : Dict ):
"""simple docstring"""
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def lowercase_( self : List[Any] , lowerCamelCase : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 707 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__A : Optional[int] = RemBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print("""Building PyTorch model from configuration: {}""".format(str(__SCREAMING_SNAKE_CASE ) ) )
__A : str = RemBertModel(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(__SCREAMING_SNAKE_CASE ) )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Dict =parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 499 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a ( A__ ) -> float:
'''simple docstring'''
return np.dot(A__ , A__ )
class lowercase :
def __init__( self : List[Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = regularization
SCREAMING_SNAKE_CASE__ : int = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
SCREAMING_SNAKE_CASE__ : Any = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def lowercase__ ( self : Any , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowercase__ ( self : Any , _lowercase : list[ndarray] , _lowercase : ndarray ):
SCREAMING_SNAKE_CASE__ : List[str] = observations
SCREAMING_SNAKE_CASE__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE__) , ) : int = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
((SCREAMING_SNAKE_CASE__) , ) : int = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = LinearConstraint(_lowercase , 0 , 0 )
SCREAMING_SNAKE_CASE__ : int = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE__ : Dict = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE__ : Dict = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE__ : Tuple = s / n
def lowercase__ ( self : Tuple , _lowercase : ndarray ):
SCREAMING_SNAKE_CASE__ : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
snake_case_ : Optional[Any] = 'pytorch_model.bin'
snake_case_ : Union[str, Any] = 'pytorch_model.bin.index.json'
snake_case_ : Optional[Any] = 'adapter_config.json'
snake_case_ : List[str] = 'adapter_model.bin'
snake_case_ : Any = 'adapter_model.safetensors'
snake_case_ : Optional[int] = 'tf_model.h5'
snake_case_ : List[Any] = 'tf_model.h5.index.json'
snake_case_ : Any = 'model.ckpt'
snake_case_ : Optional[Any] = 'flax_model.msgpack'
snake_case_ : List[str] = 'flax_model.msgpack.index.json'
snake_case_ : List[str] = 'model.safetensors'
snake_case_ : Any = 'model.safetensors.index.json'
snake_case_ : Any = 'config.json'
snake_case_ : Optional[Any] = 'preprocessor_config.json'
snake_case_ : List[Any] = FEATURE_EXTRACTOR_NAME
snake_case_ : Optional[int] = 'generation_config.json'
snake_case_ : Any = 'modelcard.json'
snake_case_ : Optional[int] = '▁'
snake_case_ : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
snake_case_ : Union[str, Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
snake_case_ : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
snake_case_ : Union[str, Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __snake_case ( _UpperCAmelCase : int):
if version.parse(_UpperCAmelCase) < version.parse(_UpperCAmelCase):
if "dev" in min_version:
UpperCamelCase = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
UpperCamelCase = f'This example requires a minimum version of {min_version},'
error_message += f' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''')
| 212 | 0 |
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(UpperCAmelCase))
]
SCREAMING_SNAKE_CASE_ :Tuple = defaultdict(UpperCAmelCase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE_ :Tuple = (1 << len(UpperCAmelCase)) - 1
def _snake_case ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE_ :Optional[int] = self.count_ways_until(UpperCAmelCase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def _snake_case ( self : str , UpperCAmelCase : Optional[int]):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase)):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 140 |
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(UpperCAmelCase))
]
SCREAMING_SNAKE_CASE_ :Tuple = defaultdict(UpperCAmelCase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE_ :Tuple = (1 << len(UpperCAmelCase)) - 1
def _snake_case ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE_ :Optional[int] = self.count_ways_until(UpperCAmelCase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def _snake_case ( self : str , UpperCAmelCase : Optional[int]):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase)):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 140 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[int] = "perceiver"
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = num_latents
A__ = d_latents
A__ = d_model
A__ = num_blocks
A__ = num_self_attends_per_block
A__ = num_self_attention_heads
A__ = num_cross_attention_heads
A__ = qk_channels
A__ = v_channels
A__ = cross_attention_shape_for_attention
A__ = self_attention_widening_factor
A__ = cross_attention_widening_factor
A__ = hidden_act
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_query_residual
# masked language modeling attributes
A__ = vocab_size
A__ = max_position_embeddings
# image classification attributes
A__ = image_size
# flow attributes
A__ = train_size
# multimodal autoencoding attributes
A__ = num_frames
A__ = audio_samples_per_frame
A__ = samples_per_patch
A__ = output_shape
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def snake_case__ ( self ) -> float:
return 1e-4
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
A__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
A__ = [" ".join(["a"] ) * seq_length] * batch_size
A__ = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
A__ = inputs.pop("input_ids" )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
A__ = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
A__ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 104 | 0 |
from jiwer import compute_measures
import datasets
lowercase_ : Union[str, Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowercase_ : Any = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowercase_ : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase_ , lowerCamelCase_ )["wer"]
else:
_snake_case : str = 0
_snake_case : Dict = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[Any] = compute_measures(lowerCamelCase_ , lowerCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 652 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 1 |
Subsets and Splits